[r-cran-vgam] 27/63: Import Upstream version 0.8-6
Andreas Tille
tille at debian.org
Tue Jan 24 13:54:29 UTC 2017
This is an automated email from the git hooks/post-receive script.
tille pushed a commit to branch master
in repository r-cran-vgam.
commit 24177ca6d68286754f972dddd9fe1d882f26f9a0
Author: Andreas Tille <tille at debian.org>
Date: Tue Jan 24 14:16:54 2017 +0100
Import Upstream version 0.8-6
---
BUGS | 9 +
DESCRIPTION | 22 +-
MD5 | 569 +--
NAMESPACE | 210 +-
NEWS | 77 +
R/aamethods.q | 124 +-
R/add1.vglm.q | 2 +-
R/attrassign.R | 2 +-
R/bAIC.q | 89 +-
R/build.terms.vlm.q | 2 +-
R/calibrate.q | 31 +-
R/cao.R | 4 +-
R/cao.fit.q | 373 +-
R/coef.vlm.q | 8 +-
R/cqo.R | 4 +-
R/cqo.fit.q | 48 +-
R/deviance.vlm.q | 2 +-
R/effects.vglm.q | 2 +-
R/family.aunivariate.R | 63 +-
R/family.basics.R | 58 +-
R/family.binomial.R | 518 +-
R/family.bivariate.R | 302 +-
R/family.categorical.R | 365 +-
R/family.censored.R | 40 +-
R/family.circular.R | 10 +-
R/family.exp.R | 36 +-
R/family.extremes.R | 470 +-
R/family.fishing.R | 10 +-
R/family.functions.R | 2 +-
R/family.genetic.R | 649 +--
R/family.glmgam.R | 551 ++-
R/family.loglin.R | 14 +-
R/family.math.R | 2 +-
R/family.mixture.R | 105 +-
R/family.nonlinear.R | 220 +-
R/family.normal.R | 508 +-
R/family.others.R | 92 +-
R/family.positive.R | 815 ++--
R/family.qreg.R | 516 +-
R/family.quantal.R | 41 +-
R/family.rcam.R | 189 +-
R/family.rcqo.R | 107 +-
R/family.robust.R | 256 +-
R/family.rrr.R | 423 +-
R/family.survival.R | 66 +-
R/family.ts.R | 400 +-
R/family.univariate.R | 5114 +++++++++++---------
R/family.vglm.R | 6 +-
R/family.zeroinf.R | 4887 ++++++++++++-------
R/fittedvlm.R | 6 +-
R/formula.vlm.q | 35 +-
R/generic.q | 2 +-
R/links.q | 159 +-
R/logLik.vlm.q | 8 +-
R/lrwaldtest.R | 597 +++
R/model.matrix.vglm.q | 11 +-
R/mux.q | 2 +-
R/nobs.R | 28 +-
R/plot.vglm.q | 123 +-
R/predict.vgam.q | 4 +-
R/predict.vglm.q | 2 +-
R/predict.vlm.q | 24 +-
R/print.vglm.q | 259 +-
R/print.vlm.q | 119 +-
R/qrrvglm.control.q | 81 +-
R/qtplot.q | 100 +-
R/residuals.vlm.q | 10 +-
R/rrvglm.R | 4 +-
R/rrvglm.control.q | 112 +-
R/rrvglm.fit.q | 20 +-
R/s.q | 2 +-
R/s.vam.q | 2 +-
R/smart.R | 2 +-
R/step.vglm.q | 2 +-
R/summary.vgam.q | 31 +-
R/summary.vglm.q | 47 +-
R/summary.vlm.q | 192 +-
R/uqo.R | 304 +-
R/vgam.R | 18 +-
R/vgam.control.q | 39 +-
R/vgam.fit.q | 34 +-
R/vgam.match.q | 2 +-
R/vglm.R | 2 +-
R/vglm.control.q | 16 +-
R/vglm.fit.q | 32 +-
R/vlm.R | 9 +-
R/vlm.wfit.q | 96 +-
R/vsmooth.spline.q | 73 +-
R/zzz.R | 2 +-
data/alclevels.rda | Bin 561 -> 561 bytes
data/alcoff.rda | Bin 557 -> 558 bytes
data/auuc.rda | Bin 243 -> 243 bytes
data/azprocedure.rda | Bin 18476 -> 0 bytes
data/backPain.rda | Bin 602 -> 469 bytes
data/backPain.txt.gz | Bin 463 -> 451 bytes
data/bmi.nz.txt.xz | Bin 0 -> 5256 bytes
data/bminz.txt.gz | Bin 6259 -> 0 bytes
data/car.all.rda | Bin 8214 -> 6944 bytes
data/chest.nz.txt.bz2 | Bin 0 -> 484 bytes
data/chestnz.txt.gz | Bin 559 -> 0 bytes
data/{nzc.txt.gz => chinese.nz.txt.gz} | Bin
data/crashbc.rda | Bin 387 -> 388 bytes
data/crashf.rda | Bin 353 -> 355 bytes
data/crashi.rda | Bin 503 -> 501 bytes
data/crashmc.rda | Bin 397 -> 397 bytes
data/crashp.rda | Bin 391 -> 389 bytes
data/crashtr.rda | Bin 375 -> 375 bytes
data/crime.us.rda | Bin 0 -> 3977 bytes
data/datalist | 47 +
data/fibre15.rda | Bin 245 -> 245 bytes
data/fibre1dot5.rda | Bin 296 -> 295 bytes
data/finney44.rda | Bin 208 -> 207 bytes
data/gala.rda | Bin 1045 -> 1050 bytes
data/gew.txt.gz | Bin 531 -> 530 bytes
data/grain.us.txt.bz2 | Bin 0 -> 883 bytes
data/hspider.rda | Bin 1311 -> 1343 bytes
data/hued.rda | Bin 410 -> 410 bytes
data/huie.rda | Bin 417 -> 416 bytes
data/hunua.txt.bz2 | Bin 0 -> 1413 bytes
data/hunua.txt.gz | Bin 1894 -> 0 bytes
data/huse.rda | Bin 322 -> 321 bytes
data/leukemia.rda | Bin 327 -> 327 bytes
data/marital.nz.rda | Bin 0 -> 10424 bytes
data/mmt.rda | Bin 5973 -> 4223 bytes
data/nzmarital.rda | Bin 25309 -> 0 bytes
data/pneumo.rda | Bin 263 -> 263 bytes
data/rainfall.rda | Bin 17147 -> 11062 bytes
data/ruge.rda | Bin 255 -> 255 bytes
data/toxop.rda | Bin 465 -> 472 bytes
data/ugss.rda | Bin 15160 -> 11609 bytes
data/uscrime.rda | Bin 3967 -> 0 bytes
data/usgrain.txt.gz | Bin 1077 -> 0 bytes
data/venice.rda | Bin 1210 -> 988 bytes
data/venice90.rda | Bin 13037 -> 8056 bytes
data/waitakere.txt.bz2 | Bin 0 -> 2253 bytes
data/waitakere.txt.gz | Bin 3090 -> 0 bytes
data/wffc.indiv.rda | Bin 2867 -> 2567 bytes
data/wffc.nc.rda | Bin 5671 -> 4265 bytes
data/wffc.rda | Bin 13564 -> 10245 bytes
data/wffc.teams.rda | Bin 541 -> 540 bytes
data/xs.nz.rda | Bin 0 -> 221352 bytes
inst/doc/categoricalVGAM.Rnw | 18 +-
inst/doc/categoricalVGAM.pdf | Bin 678455 -> 677826 bytes
inst/doc/jss.bst | 1647 -------
inst/doc/jss.cls | 473 --
man/ABO.Rd | 13 +-
man/BratUC.Rd | 96 -
man/Inv.gaussian.Rd | 2 +-
man/Qvar.Rd | 32 +-
man/SurvS4-class.Rd | 13 +-
man/SurvS4.Rd | 104 +-
man/VGAM-package.Rd | 16 +-
man/alaplace3.Rd | 4 +-
man/amlnormal.Rd | 32 +-
man/benfUC.Rd | 20 +-
man/benini.Rd | 3 +-
man/beniniUC.Rd | 2 +-
man/beta.ab.Rd | 20 +-
man/betaII.Rd | 28 +-
man/betabinomial.ab.Rd | 12 +-
man/bilogistic4.Rd | 16 +-
man/binom2.or.Rd | 54 +-
man/binomialff.Rd | 4 +-
man/bivgamma.mckay.Rd | 2 +-
man/{bminz.Rd => bmi.nz.Rd} | 26 +-
man/brat.Rd | 152 +-
man/bratUC.Rd | 100 +
man/bratt.Rd | 162 +-
man/calibrate.qrrvglm.Rd | 41 +-
man/calibrate.qrrvglm.control.Rd | 5 +-
man/cdf.lmscreg.Rd | 2 +-
man/cenpoisson.Rd | 28 +-
man/{chestnz.Rd => chest.nz.Rd} | 19 +-
man/{nzc.Rd => chinese.nz.Rd} | 10 +-
man/cqo.Rd | 72 +-
man/{uscrime.Rd => crime.us.Rd} | 14 +-
man/cumulative.Rd | 59 +-
man/dagum.Rd | 18 +-
man/{DagumUC.Rd => dagumUC.Rd} | 27 +-
man/deplot.lmscreg.Rd | 2 +-
man/eunifUC.Rd | 7 +-
man/exppoisson.Rd | 9 +-
man/fgm.Rd | 11 +-
man/fgmUC.Rd | 16 +-
man/fisk.Rd | 20 +-
man/{FiskUC.Rd => fiskUC.Rd} | 29 +-
man/fittedvlm.Rd | 4 +-
man/frechet.Rd | 2 +-
man/frechetUC.Rd | 2 +-
man/garma.Rd | 69 +-
man/genbetaII.Rd | 46 +-
man/gengamma.Rd | 2 +-
man/genpoisson.Rd | 4 +-
man/geometric.Rd | 28 +-
man/golf.Rd | 62 +-
man/gpd.Rd | 25 +-
man/{usgrain.Rd => grain.us.Rd} | 8 +-
man/grc.Rd | 56 +-
man/gumbelIbiv.Rd | 8 +-
man/gumbelUC.Rd | 2 +-
man/hyperg.Rd | 41 +-
man/hzetaUC.Rd | 2 +-
man/iam.Rd | 21 +-
man/inv.gaussianff.Rd | 17 +-
man/invlomax.Rd | 34 +-
man/{InvlomaxUC.Rd => invlomaxUC.Rd} | 29 +-
man/invparalogistic.Rd | 32 +-
man/{InvparalogisticUC.Rd => invparalogisticUC.Rd} | 30 +-
man/lms.bcg.Rd | 10 +-
man/lms.bcn.Rd | 27 +-
man/lms.yjn.Rd | 6 +-
man/loge.Rd | 2 +-
man/logistic.Rd | 2 +-
man/loglaplace.Rd | 71 +-
man/loglog.Rd | 16 +-
man/lognormal.Rd | 2 +-
man/lomax.Rd | 26 +-
man/{LomaxUC.Rd => lomaxUC.Rd} | 25 +-
man/lrtest.Rd | 167 +
man/margeff.Rd | 14 +-
man/{nzmarital.Rd => marital.nz.Rd} | 30 +-
man/maxwell.Rd | 18 +-
man/maxwellUC.Rd | 3 +-
man/mbinomial.Rd | 75 +-
man/mccullagh89.Rd | 6 +-
man/mix2exp.Rd | 42 +-
man/mix2normal1.Rd | 7 +-
man/morgenstern.Rd | 12 +-
man/multinomial.Rd | 121 +-
man/nbcanlink.Rd | 174 +
man/nbolf.Rd | 9 +-
man/negbinomial.Rd | 58 +-
man/negbinomial.size.Rd | 134 +
man/normal1.Rd | 46 +-
man/notdocumentedyet.Rd | 98 +-
man/olympic.Rd | 2 +-
man/ordpoisson.Rd | 2 +-
man/paralogistic.Rd | 22 +-
man/{ParalogisticUC.Rd => paralogisticUC.Rd} | 26 +-
man/pareto1.Rd | 39 +-
man/paretoIV.Rd | 36 +-
man/{ParetoIVUC.Rd => paretoIVUC.Rd} | 0
man/plotdeplot.lmscreg.Rd | 2 +-
man/plotqtplot.lmscreg.Rd | 2 +-
man/plotvgam.Rd | 69 +-
man/plotvgam.control.Rd | 8 +-
man/polf.Rd | 52 +-
man/posgeomUC.Rd | 107 +
man/posnegbinomial.Rd | 69 +-
man/pospoisUC.Rd | 7 +-
man/pospoisson.Rd | 49 +-
man/propodds.Rd | 2 +-
man/qrrvglm.control.Rd | 31 +-
man/qtplot.lmscreg.Rd | 2 +-
man/{RayleighUC.Rd => rayleighUC.Rd} | 0
man/rrar.Rd | 63 +-
man/rrvglm.Rd | 4 +-
man/sinmad.Rd | 35 +-
man/{SinmadUC.Rd => sinmadUC.Rd} | 18 +-
man/toxop.Rd | 10 +-
man/tparetoUC.Rd | 9 +-
man/trplot.qrrvglm.Rd | 16 +-
man/ugss.Rd | 6 +-
man/undocumented-methods.Rd | 60 +-
man/uqo.Rd | 64 +-
man/uqo.control.Rd | 14 +-
man/vgam-class.Rd | 3 +
man/vgam.Rd | 76 +-
man/vgam.control.Rd | 35 +-
man/vglm.Rd | 76 +-
man/vglm.control.Rd | 140 +-
man/vonmises.Rd | 11 +-
man/weibull.Rd | 2 +-
man/weightsvglm.Rd | 2 +-
man/xs.nz.Rd | 420 ++
man/yip88.Rd | 2 +-
man/zabinomUC.Rd | 81 +
man/zabinomial.Rd | 135 +
man/zageomUC.Rd | 81 +
man/zageometric.Rd | 142 +
man/zanegbinUC.Rd | 37 +-
man/zanegbinomial.Rd | 80 +-
man/zapoisUC.Rd | 36 +-
man/zapoisson.Rd | 113 +-
man/zero.Rd | 2 +-
man/zibinomUC.Rd | 48 +-
man/zibinomial.Rd | 59 +-
man/zigeomUC.Rd | 33 +-
man/zigeometric.Rd | 55 +-
man/zinegbinUC.Rd | 65 +-
man/zinegbinomial.Rd | 77 +-
man/zipf.Rd | 52 +-
man/zipoisUC.Rd | 62 +-
man/zipoisson.Rd | 87 +-
src/caqo3.c | 133 +-
src/gautr.c | 2 +-
src/lms.f | 6 +-
src/muxr.c | 72 +-
src/rgam3.c | 13 +
src/vgam3.c | 4 +-
src/vmux3.c | 30 +-
301 files changed, 16797 insertions(+), 12537 deletions(-)
diff --git a/BUGS b/BUGS
index 7edde98..a31fe77 100755
--- a/BUGS
+++ b/BUGS
@@ -1,5 +1,14 @@
Here is a list of known bugs.
+2011-12
+
+VGAM version 0.8-4 said it needed R version 2-11.1 or later.
+But really, R version 2-13.0 or later is needed.
+This is because the generic nobs() was not defined properly.
+Another fix is to install the (latest) prerelease version
+at http://www.stat.auckland.ac.nz/~yee/VGAM/prerelease
+
+
2010-04-12
diff --git a/DESCRIPTION b/DESCRIPTION
index daff049..ef4d161 100755
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,20 +1,22 @@
Package: VGAM
-Version: 0.8-4
-Date: 2011-11-04
+Version: 0.8-6
+Date: 2012-02-27
Title: Vector Generalized Linear and Additive Models
Author: Thomas W. Yee <t.yee at auckland.ac.nz>
Maintainer: Thomas Yee <t.yee at auckland.ac.nz>
-Depends: R (>= 2.11.1), splines, methods, stats, stats4
+Depends: R (>= 2.14.0), splines, methods, stats, stats4
+Suggests: MASS
Description: Vector generalized linear and additive models, and
- associated models (Reduced-Rank VGLMs, Quadratic RR-VGLMs,
- Reduced-Rank VGAMs). This package fits many models and
- distribution by maximum likelihood estimation (MLE) or
- penalized MLE. Also fits constrained ordination models in
- ecology.
+ associated models (Reduced-Rank VGLMs, Quadratic RR-VGLMs,
+ Reduced-Rank VGAMs). This package fits many models and
+ distribution by maximum likelihood estimation (MLE) or
+ penalized MLE. Also fits constrained ordination models in
+ ecology.
License: GPL-2
+Imports: methods, stats, stats4
URL: http://www.stat.auckland.ac.nz/~yee/VGAM
LazyLoad: yes
LazyData: yes
-Packaged: 2011-11-04 02:00:36 UTC; tyee001
+Packaged: 2012-02-27 06:43:51 UTC; ripley
Repository: CRAN
-Date/Publication: 2011-11-04 07:32:26
+Date/Publication: 2012-02-27 06:46:06
diff --git a/MD5 b/MD5
index 6e799f1..7d6d4d4 100644
--- a/MD5
+++ b/MD5
@@ -1,139 +1,141 @@
-a044e2c802f4451fee8abdf55163e99b *BUGS
-cef871dd6555952f7c56e4befa1b1fc1 *DESCRIPTION
+60b13c57b66bb77e65321c5c0a3b1dab *BUGS
+9983282911eb6c26048f19ae07b5eb78 *DESCRIPTION
dd959d3a0cd680792122813a7d58d506 *DISCLAIMER
-ffc4775ec72cd9429aa3ba1e1c7b6a4b *NAMESPACE
-f0acc91eb36c7d77927c063d896e2177 *NEWS
-689d9e2adac8be393f6c99ceb231c574 *R/aamethods.q
-6e33af4dffcedfb8f5f8731c8af9a09b *R/add1.vglm.q
-295daee86a2890f2a6dc0c220cfd5ccb *R/attrassign.R
-a8ea0c12322404e93d896047d9fe2580 *R/bAIC.q
-5f4a87b13f0ced19c72d7f02555be6cd *R/build.terms.vlm.q
-4decb84c722995c2f054c85a0d592afb *R/calibrate.q
-72d86735dfa5eaf44d3c19b7864c42d0 *R/cao.R
-1c1602b6407924c7314546e57a46e1ce *R/cao.fit.q
-f0179cd7b5b0929e20d34f1f8c51f828 *R/coef.vlm.q
-2fcc5dae605f3cc680c93ba5a87aebca *R/cqo.R
-f2513612cbc71e6fc40d9ecc46578a13 *R/cqo.fit.q
-892125f19d7b3e1d00528e77c4e01af0 *R/deviance.vlm.q
-54a92d9b9988e5b21564f7ba463b816a *R/effects.vglm.q
-d48651321ada7f10a098a5ce6377f861 *R/family.aunivariate.R
-f0238dc328145e78234bd85e43a4607d *R/family.basics.R
-f1a4e8cb35daac0e85f34911ac23f3c9 *R/family.binomial.R
-bb4c61a751490a663a5bf3f05ab64c27 *R/family.bivariate.R
-95900e213c3d43abc198b198b24724bf *R/family.categorical.R
-a77a755af8f72cbea842c9ad6d4bb822 *R/family.censored.R
-8d6813cb126e055ecf6bc8ae00618fda *R/family.circular.R
-ee035a253c2adc60595d50d4b156db20 *R/family.exp.R
-4d65c6e54a4e027286b6a5e1299610b9 *R/family.extremes.R
-66a4fac49fd81a28f72296a22299854a *R/family.fishing.R
-b0a812cdfa23b7023ea6797f14da7f61 *R/family.functions.R
-3bc6f870614b8e4900cbbc60171d236d *R/family.genetic.R
-0f1b25d25fdd253a3515dd5b60640840 *R/family.glmgam.R
-4c195e9251e585a004feff69ba818e79 *R/family.loglin.R
-eb1eb7fd0ce4a3c30f9fda93eff1cbe9 *R/family.math.R
-695fa168795ad9d8ad268e8c70b7f23b *R/family.mixture.R
-d1566b7eefdf2318eb3cab8c69ea1b7d *R/family.nonlinear.R
-4318228aa87da125b19e77e70776b996 *R/family.normal.R
-e80de639e5bd5b5d40f30300e40f08b6 *R/family.others.R
-5268e2409e9e988f21d018ebaed05d4a *R/family.positive.R
-df6d8bec5d84002a275c426a9cb2ab3d *R/family.qreg.R
-8c4844e8b09774e6011961a6b035bc8a *R/family.quantal.R
-f2192aaeafe73959410d11f306ec550b *R/family.rcam.R
-6205244d6bca354e9099465175a0c41f *R/family.rcqo.R
-5bf1e3a44ae9b0f3be559fe671fbb5ba *R/family.robust.R
-99676cb382ad33c42d44d99ca6bcf115 *R/family.rrr.R
-e78e92666da7d5ebc729e95f5e27a70a *R/family.survival.R
-9d326d2196256efc4440a38ed9c08915 *R/family.ts.R
-644d480b204af8dab246e33cb3560d7c *R/family.univariate.R
-2a0447d4fffc801c49940f6844c29a8e *R/family.vglm.R
-409ffe41def91bed78e7729b3b42d4ba *R/family.zeroinf.R
-be801764c05394abeb6fa9ef72c004c3 *R/fittedvlm.R
-bf1effc73308ce990f35e166d1afa3e9 *R/formula.vlm.q
-e6753656b1a4eda8de5a01f673f017b8 *R/generic.q
-5aae507d37896f32b306cef3104f67df *R/links.q
-e3e4f2b6154a1e9e5809dd9170a3c08f *R/logLik.vlm.q
-8e771f28093207d25411322a6dfbcf60 *R/model.matrix.vglm.q
-af360ccea29695578291731572c25935 *R/mux.q
-1d68413c98a9b14048e915987e8b6f2b *R/nobs.R
-0cf5cf87340841babab0079f2d0be4f8 *R/plot.vglm.q
-f4f369f39b03bd02949026067385cb68 *R/predict.vgam.q
-9dcabeffa5e38725330428c42c365e3d *R/predict.vglm.q
-4524809f86ca0397363d5084d8c96803 *R/predict.vlm.q
-a98e9c0e9f8d862ef005208719c35ef4 *R/print.vglm.q
-880145f2d76445d0c6aeb324d7411714 *R/print.vlm.q
-e3f7546eae138b8cee983ea0d00a242d *R/qrrvglm.control.q
-4d09fca82a261a6a49a8d47044c33917 *R/qtplot.q
-59647a55c435bec7550c5ed6fda02e35 *R/residuals.vlm.q
-cf6429232a3a89373ec7c63a98f0fb75 *R/rrvglm.R
-7f7d3e3bad38646f5bf9a34c8fb6ab32 *R/rrvglm.control.q
-84e079597b6e18ad2740c6bee1e6f6a8 *R/rrvglm.fit.q
-c5c9f637a7ad26b9a4162a4c6eec99c2 *R/s.q
-169e3c35b99e51037aaa351d1bc1921d *R/s.vam.q
-caf8310bbeb408386ae17254e5bc84f1 *R/smart.R
-c5f98d003fe18794fa1ea199a0f3765d *R/step.vglm.q
-c663f2fa6d27dfe128f95e7f23d3dbec *R/summary.vgam.q
-ffc37c249d39e1553e80903434b0b425 *R/summary.vglm.q
-d3c08c40209f1793ea3f3506f4ecd59e *R/summary.vlm.q
-e75d30e13194b481eee08f6389fdc33a *R/uqo.R
-4ab5232d0b690a132c8c4d13ea5ce945 *R/vgam.R
-fe27c34e65f4ca6e90ad7103918c5830 *R/vgam.control.q
-64d9ca84c782058abb616078435f1bae *R/vgam.fit.q
-4ed0000ef2eb45e49a58eebff882efe6 *R/vgam.match.q
-09a7d574f71c2b432543457c877b8846 *R/vglm.R
-14649525df31aeb0ff40371a10f2af94 *R/vglm.control.q
-361a0ff8019c7f39de8b60e80e593ea0 *R/vglm.fit.q
-21e25b67033e2ff1ff137f3af345af15 *R/vlm.R
-4c6b53a8347f9920b1c83a5a1154f9fa *R/vlm.wfit.q
-324ecf9d8340de85927bf438c77c16d7 *R/vsmooth.spline.q
-464a864c23a9b7f9ea842f9afaad6a38 *R/zzz.R
-113aeab933301f755800d2e8cac8d32f *data/alclevels.rda
-9572e33b0e02dad63ade980ad9c15090 *data/alcoff.rda
-cebebfb94f82451e02607c06a4534c5e *data/auuc.rda
-2148c2429af14dc540b92e593a020edc *data/azprocedure.rda
-9c544fb74250dcdfb544001b04c6efc1 *data/backPain.rda
-57d3964752848e70454460e8eca354e8 *data/backPain.txt.gz
-a2d105f68098112918dd96d07cbcfa43 *data/bminz.txt.gz
-4c681b89033cf4586d992845b6f43217 *data/car.all.rda
-4f5ce642e2f8e717aca6b60a15d7fdbb *data/chestnz.txt.gz
+ffdf65e3366ca30fd782477833b83e48 *NAMESPACE
+0834f6f2fc163691846e9f3ed30775df *NEWS
+dd21b3270922941c5f772fcbc0fdbb53 *R/aamethods.q
+b3239d3ffdffe1de9581354b05d66016 *R/add1.vglm.q
+95904ca107a7d51b5a7a6603304e962b *R/attrassign.R
+fa7ea29619a00514f855a075070edae7 *R/bAIC.q
+73b6069b0e12d23150dcfc61134aeed5 *R/build.terms.vlm.q
+56bed730c52b3d44ff151e0b6db57be6 *R/calibrate.q
+b09327ef1094ac0ff74c3283155ea3fb *R/cao.R
+0bdd385d0a1232292595d702d2ae167d *R/cao.fit.q
+9e68dd0424ac75874e253c22099ce952 *R/coef.vlm.q
+a3f5ad1bd124d07a69c74b7f917a9a58 *R/cqo.R
+e94fae353c86c2ece43792e9c2f777a0 *R/cqo.fit.q
+7b8cf0f581c96ff945342e9b9a86bd48 *R/deviance.vlm.q
+80861c2c2454b9c298e11cbadc431056 *R/effects.vglm.q
+e4f630778920ecc97ae9e561eeb651f9 *R/family.aunivariate.R
+d05c94ac56886d1455d4f47322384a0e *R/family.basics.R
+3d4b21ad6ce25d7ff8a32efdfedbce78 *R/family.binomial.R
+37abad6c4eaa05fea096bb1f4e3da8cc *R/family.bivariate.R
+011da662eaecf74a957909307678476d *R/family.categorical.R
+aa067e1804876a7cb4b091aa328af510 *R/family.censored.R
+ba2f3c8fe4b2e8ba26973027fa2d676d *R/family.circular.R
+e890b1d9e0081a2ec9972ded68a83c4c *R/family.exp.R
+35cc6d95647629042edfc020e2e128e5 *R/family.extremes.R
+938226f9a34e4f6fd80108a9a89c2651 *R/family.fishing.R
+e2045b675e24fef59298e82a20604d2d *R/family.functions.R
+1a2e09d03675bb6dff6e61cb2e7c76d5 *R/family.genetic.R
+e4443878ff13242dd5d4591512003cd8 *R/family.glmgam.R
+3c3fabbb223815ee25a4a5c62c2e3c7b *R/family.loglin.R
+e159913225b326c326e2c8bffde57dcc *R/family.math.R
+5bb0c3ac9343bbf891bfa432f93dea17 *R/family.mixture.R
+0fa3e3d6a3138cc8eac0f320a389ae11 *R/family.nonlinear.R
+bc8a8da5dded0eae4286c2d009c0a227 *R/family.normal.R
+9135d29542ca7103b3a9155b79f65f94 *R/family.others.R
+05a4cbbc15d57f0338680ab0fcc9a314 *R/family.positive.R
+99a2aa62776d03bc8729be73a2f243c1 *R/family.qreg.R
+fdca4da3063c9acfb9c5abe845c333af *R/family.quantal.R
+17bae8bf7bbd291cc85c48057a975ee1 *R/family.rcam.R
+f9ae840ae9e77833947050013e493a29 *R/family.rcqo.R
+edc9ff129ca91ba61a4f885131870024 *R/family.robust.R
+f1bbd70c6e43f075af67a6dc9a867e39 *R/family.rrr.R
+4b07bd955d32ceb26d12eb62ce121867 *R/family.survival.R
+c737f9809bc0727f6733906db4fc8f9e *R/family.ts.R
+501352c1bf58a75dc2f4dab35af013ad *R/family.univariate.R
+ba86e91c886455a19a223ab62d72910b *R/family.vglm.R
+1b2fccb12954c194140ad6d289666328 *R/family.zeroinf.R
+c4209518badc8f38d39cd2d7b8405d24 *R/fittedvlm.R
+c6167af1886c3d0340b9b41a66f8d1a9 *R/formula.vlm.q
+33aa96487bc94130897db4ca82ec9559 *R/generic.q
+c7ee34c4632f6fe2c8dfc13b9050071c *R/links.q
+0aef958fdd7db1b20ee26c818807d2c1 *R/logLik.vlm.q
+12c9c7e7246afe10034cdd60a90a73d0 *R/lrwaldtest.R
+5a4b0002356f5155a223d37c4db23cdf *R/model.matrix.vglm.q
+1d7b6abb53d483524b70173b582cc60a *R/mux.q
+939ddbb40d567790aba7c2e0fbf84ad2 *R/nobs.R
+0cb5b755110ed2ada83d8621c94db5ee *R/plot.vglm.q
+01e3395c8bf121d1c2a648942397411c *R/predict.vgam.q
+b72b8be0e0cccf16319dd8faf03f90c7 *R/predict.vglm.q
+2700d35af398314cbfc29efb3eda71da *R/predict.vlm.q
+d56618bb580ea017562efbb27aa69ee5 *R/print.vglm.q
+04a7a1cc3e40dc1b4e861e191e91edfd *R/print.vlm.q
+9fb95687e7080c3b216ee7c79cb1be0a *R/qrrvglm.control.q
+5b4ac13de461e108659579b651cc8d09 *R/qtplot.q
+f9a4d1bd1d2eb0a93d46d6043909c01d *R/residuals.vlm.q
+144643456f5d88647df94b5421bd850a *R/rrvglm.R
+9029b2cf71b02f40d8b8d1424ee044e4 *R/rrvglm.control.q
+a64fe1a10cc52a121f1af8f410953e4c *R/rrvglm.fit.q
+b7b95cdd6591250161f2c93699983466 *R/s.q
+b0970500631acb1935d4415e6d6054dc *R/s.vam.q
+fe6132afff206af8d15031d48a880a4d *R/smart.R
+7ce45be4048ac6023d1bfcd703f80391 *R/step.vglm.q
+44fc620e4e847fee0a9ce1365f3ffd27 *R/summary.vgam.q
+729c7eafc395a92fb06276b8fcc064e6 *R/summary.vglm.q
+242d83e29ebcdbebf30dd6e1b256aaa6 *R/summary.vlm.q
+753dad5450557ae57cbff2cb4c229b3a *R/uqo.R
+f223707b00020c752536ef95ea7150bb *R/vgam.R
+6cc23d07c2031dcad93254f092118ce9 *R/vgam.control.q
+726aa9b28c1fb045753253af10152e71 *R/vgam.fit.q
+a4c1ebcffe0e1e65daaff53ae414da4c *R/vgam.match.q
+a55a151f6dfe2b990f53bda1ba05d7db *R/vglm.R
+6a90cb8ef0c7344688f28b8c1fa91d23 *R/vglm.control.q
+8848400124bd09e82f5505d5d0f4683a *R/vglm.fit.q
+38aeb51b3ed4d9a4a5f1af56da21b32b *R/vlm.R
+e76f5e142ff6bc7ad92fc0eece93bb9d *R/vlm.wfit.q
+991035d00cfe83ca882204627e8c226f *R/vsmooth.spline.q
+c1c2fce6995f00d9ec512f818662a7c1 *R/zzz.R
+20af71b6e9c7941180f5cac65b3a2008 *data/alclevels.rda
+aab70e1d478ff0276394f41092ccf175 *data/alcoff.rda
+c807bdeb53b4f84166301dda661a9489 *data/auuc.rda
+5106b445716171a69266916bc7e055e6 *data/backPain.rda
+4fa3eac69a59ea5ed0123d54528e5595 *data/backPain.txt.gz
+e039fd36c33b359830b2ac811ca7fc49 *data/bmi.nz.txt.xz
+94632d36530fafb1043397ba0db32802 *data/car.all.rda
+b29c1a4125f0898885b0a723442d6a92 *data/chest.nz.txt.bz2
+0f45f6779a3c3583f4edf68da4045509 *data/chinese.nz.txt.gz
3cb8bc8e1fc615416f0c8838a50b3f51 *data/coalminers.txt.gz
-9d7140ab7647878741798d566ded66e1 *data/crashbc.rda
-74e92674a14dc48e14f1d92d7b12b930 *data/crashf.rda
-b276773c2ca97585cbd42fc618425d7a *data/crashi.rda
-57cefc814d96653c54ca38fffdb7f0da *data/crashmc.rda
-85be54ceaaea93a106648b0d12fa3d38 *data/crashp.rda
-b5eba07396e4a31c4c1e4f7383c76cd3 *data/crashtr.rda
+671d8bbafbccc6f6b9a4b70cec7df06a *data/crashbc.rda
+7b344ca8042a5c2e2d54e992dccca638 *data/crashf.rda
+4585390d90110664136b1a51344427d4 *data/crashi.rda
+c0db241aef6494f367b41abf8e1af762 *data/crashmc.rda
+b88ad849a8715489bc8180d9643c735f *data/crashp.rda
+94fa571b96154f0837f7bac9a21ea780 *data/crashtr.rda
+8d41cbf9902f7253a4b6d783ed4b0175 *data/crime.us.rda
+b94c51afb0b6dbb8bfda5dc65592ab5c *data/datalist
08e87bb80a2364697b17ccec6260387c *data/enzyme.txt.gz
-104c9a023954dfdd7ce65b71b81ee459 *data/fibre15.rda
-940778283f82556333c3a8cc61499b5f *data/fibre1dot5.rda
-11765a6627d3f05131c5cffdb0a987bc *data/finney44.rda
-4d4e1c1057c6e58258bf96c01cdc1afd *data/gala.rda
-586cc1b6764d55ecb3798b84218f698e *data/gew.txt.gz
-3a9f7d1fbbebca426a24442cd04107bc *data/hspider.rda
-6a6e83076d85684d065e62399cfa1e40 *data/hued.rda
-815f4936c0ecd60ba86a867338da863a *data/huie.rda
-8881c57a0dc513549828cf967e43e454 *data/hunua.txt.gz
-17d7fbd51b4711afc4752b14a6315d1e *data/huse.rda
-5dc05e8a2a5765055956e0abc58f5fff *data/leukemia.rda
+56de06b778fd5bf4e3e764738ad964b6 *data/fibre15.rda
+ea4644affa39d5565a6c3541340cdaf5 *data/fibre1dot5.rda
+cc6a260748efa38f06589f1cdbeee06a *data/finney44.rda
+36cc34dd8ea5279d55a4ff4a5858e940 *data/gala.rda
+8508a1cb5a09b65616ed9dfe1fc7a7a9 *data/gew.txt.gz
+bec512b2d2d680889c9b71c7b97dbffd *data/grain.us.txt.bz2
+353f60a0a152a6680dd73e18313ce351 *data/hspider.rda
+7bdca06f01a53d2f6627d7f16fbcb5c0 *data/hued.rda
+edd2b1de54d57beab99b6df27e80473e *data/huie.rda
+dffe21fbabf645127bccc3f3733098a7 *data/hunua.txt.bz2
+55fe633c571d9959b381eff64790be4f *data/huse.rda
+ecea04e7d259d48a8f99bfb98f484113 *data/leukemia.rda
aba4885e0eeda8ee887a422fee01e02a *data/lirat.txt.gz
-78e35e9862a43ff31579fe7ed29e1be3 *data/mmt.rda
-0f45f6779a3c3583f4edf68da4045509 *data/nzc.txt.gz
-5b88c319b7cc110298ccccbd20f1f63a *data/nzmarital.rda
+788d027738638ea01bb2be8f176b8151 *data/marital.nz.rda
+da16695dff97279ba7d1e0ddc58baf32 *data/mmt.rda
1017612628ed904e97e5a426d307b16f *data/olympic.txt.gz
3ed63397c4a34f3233326ade6cfd1279 *data/oxtemp.txt.gz
-7da3f7ba49a190a68539c3bb248dc7de *data/pneumo.rda
-a154338a23054326f3afac21214c5dce *data/rainfall.rda
-742019771657a52118284311b2bb8aa1 *data/ruge.rda
-a36796d60ad14f05024e2d84495004fa *data/toxop.rda
-26a13fd8ccb944b5c7ddff43115a4db5 *data/ugss.rda
-bdd6a67f4910e8de546093d063e3bceb *data/uscrime.rda
-e44a36175f75fcbf6a3e7c51a2b5f9ec *data/usgrain.txt.gz
-4c353f323c41f4a1b73be98d5bf52494 *data/venice.rda
-5469e732f315a221cb79fb0091ce6b52 *data/venice90.rda
-728a2da997645be1c6eb332be92e850a *data/waitakere.txt.gz
-7ae69687f10e102feeb9e04fb1d93d29 *data/wffc.indiv.rda
-51e5e4667d5ef260e254944faa9d6540 *data/wffc.nc.rda
-c5f27c15f26deaaa8234b2fa9137e4df *data/wffc.rda
-5bf80f6fd179f687948f6faafabbfb8d *data/wffc.teams.rda
+a7bd35ce047757423918ec3df1d0c64f *data/pneumo.rda
+114f50258909aa549be0682ea76f1cbd *data/rainfall.rda
+363e6a27dc9e81112bd600c7edda300a *data/ruge.rda
+c7834fcfd9a9b7189f1a2dc4f4ad93ed *data/toxop.rda
+dd568cf936049ae6856176a9467c42d0 *data/ugss.rda
+6d4ffc05cebc82f53718240ff8f365c8 *data/venice.rda
+927ac069625c6c27255640d027f27ad1 *data/venice90.rda
+e990ca4deea25b60febd2d315a6a9ec4 *data/waitakere.txt.bz2
+2270fe985a9b1a1b28c8e2b3b5809126 *data/wffc.indiv.rda
+563d6436ed4ac5a762037855e2d36ff9 *data/wffc.nc.rda
+8cc72d7b3ec762ed0c78306cf8269906 *data/wffc.rda
+82ecda3285b2922b63e2493a20ccdffe *data/wffc.teams.rda
+352e18b15cd0acc11e30d2b742ab0305 *data/xs.nz.rda
81f7f0844a196dc48e91870c4cfafc99 *demo/00Index
532aba4ad4cac611141491a5bb886236 *demo/binom2.or.R
a7db0d0c4cc964b01ddbe0cb74153304 *demo/cqo.R
@@ -142,17 +144,14 @@ d2c02ccaf4d548cc83b3148e55ff0fa3 *demo/lmsqreg.R
a3d2728927fc5a3090f8f4ae9af19e1a *demo/vgam.R
00eee385e1a5c716a6f37797c3b4bec5 *demo/zipoisson.R
45d6563f929e021db90f9c0289e6093e *inst/CITATION
-466027c0c99178bb6e695d9439fd6829 *inst/doc/categoricalVGAM.Rnw
-32351bc765fab29b9018c69d9dca22b8 *inst/doc/categoricalVGAM.pdf
+51437c0e17cd2de2d3548017336eb8b1 *inst/doc/categoricalVGAM.Rnw
+77fe125caa46c0f8512e84ce87857143 *inst/doc/categoricalVGAM.pdf
e4c5415e487f533b70695b17e40d97bc *inst/doc/categoricalVGAMbib.bib
-92e903f33d4067a7fbc89fa4e7571c92 *inst/doc/jss.bst
-c98d257f9bdb7cca3a1dc6306ede5d77 *inst/doc/jss.cls
ae4c252ab1ff7ea5097b50925524c6c8 *man/AA.Aa.aa.Rd
6e6488fe17bda74157417f38f7d63df1 *man/AB.Ab.aB.ab.Rd
426224676fcf86a274ee40a1e897ff51 *man/AB.Ab.aB.ab2.Rd
-f39858c0426842ecf98e71fce7dae258 *man/ABO.Rd
+4d087454d28e88143204b8ae0a6e94a3 *man/ABO.Rd
e205077baf82273656dade8e39dfd0f0 *man/AICvlm.Rd
-0c05d0d66ab909a5a7916f89004b8316 *man/BratUC.Rd
4c634c4ac3a9673b49e00a21a5edcac0 *man/Coef.Rd
42eae1271b8c7f35a723eec2221a21f2 *man/Coef.qrrvglm-class.Rd
b00890f6b16bb85829fcea8e429045b9 *man/Coef.qrrvglm.Rd
@@ -160,74 +159,66 @@ b00890f6b16bb85829fcea8e429045b9 *man/Coef.qrrvglm.Rd
5bff76cdc1894e593aa8d69a6426b0b3 *man/Coef.rrvglm.Rd
02efc2828e76eac595695059463d1d47 *man/Coef.vlm.Rd
75b836cf0732d7eb2ab70aab73958cf8 *man/CommonVGAMffArguments.Rd
-310947086394afe4710940e6b5f2a20f *man/DagumUC.Rd
4c84f8608e7e5a2a69fbb22198aadf95 *man/DeLury.Rd
-ce9fbf6bdfba2dd4b29264c4b91a9ed2 *man/FiskUC.Rd
2243f6f66449d96a9c370d9cb118bc85 *man/G1G2G3.Rd
-08653494776ff33b9055399d1cfd36c7 *man/Inv.gaussian.Rd
-d603c4299f106fdb46fbea981204fe29 *man/InvlomaxUC.Rd
-a009627453d19f5eaf6ae71f4f0b1050 *man/InvparalogisticUC.Rd
+8594694ec7498eb252846e5e98930532 *man/Inv.gaussian.Rd
40f8887a9e6322c1bea8ce385468c991 *man/Links.Rd
-61d800c277d36cbae33d37f44f3b6678 *man/LomaxUC.Rd
0204cf1e24403cbd66194f76dc3f1040 *man/MNSs.Rd
86a807027a2ed716e89276800c8714be *man/Max.Rd
2e0f16626b262cb24ca839f7313e8fb9 *man/Opt.Rd
-33f08c2321eba55fbfe58b3020c2b28a *man/ParalogisticUC.Rd
a0c448aa48678a37e4fc983bb532d141 *man/Pareto.Rd
-b89db00a67be3a3aaa3095f3174e831d *man/ParetoIVUC.Rd
-75fad694be2767a5375d9be5be022ad7 *man/Qvar.Rd
-eeb74d98864573758cfe36ba13ef6ef1 *man/RayleighUC.Rd
+a11e8355c8a19a851bf46809073b526a *man/Qvar.Rd
0404984840078254ed64e04618bf56ca *man/Rcam.Rd
-f379038af8e484391f0f928a1857b3a1 *man/SinmadUC.Rd
-c81b93652782bea87a01f828300930f7 *man/SurvS4-class.Rd
-aa35103aecf13d0119e5a1a17f5ae57e *man/SurvS4.Rd
+2db32b22773df2628c8dbc168636c9f0 *man/SurvS4-class.Rd
+4f4e89cb6c8d7db676f3e5224d450271 *man/SurvS4.Rd
56b6bf93ed5da4c3e8324758bfde36aa *man/Tol.Rd
-8647246f41c25c8de6e9812dbcdffe9a *man/VGAM-package.Rd
+69e999f635cae6333515c98a09a8b7c0 *man/VGAM-package.Rd
a092ccdd940f1f911845d1e4e7ca8f2e *man/acat.Rd
-d48a96cc10227d71c1f911d4ab2e1f4e *man/alaplace3.Rd
+9d8a50479e0a331261a834e5fe82c65b *man/alaplace3.Rd
0faf4d7fdfb9526dec05f6ff87680b90 *man/alaplaceUC.Rd
fc94162782c395640db18e1ff7c6ebb5 *man/amh.Rd
df8c8413b03b440d0451f50d92321e0f *man/amhUC.Rd
73bb3963d43fd465ff2dd6afdb5473d1 *man/amlbinomial.Rd
bc2496ef5c112b9d663b1fc90a1c493b *man/amlexponential.Rd
-da42748b4b58104ffb2fe8cfad711dab *man/amlnormal.Rd
+dc06ac869a484aa41dd301d11f5372f3 *man/amlnormal.Rd
2c2e41401482c0d156dd568480888925 *man/amlpoisson.Rd
ba175111a99a5dd998a544b32e3390d4 *man/auuc.Rd
37adc3f8e2804c880143a06e475bfd81 *man/backPain.Rd
-ae939e9f4844d1a74c34f1792f4d6624 *man/benfUC.Rd
-fb0dbdc1e60369a7a29fc5d379e8b629 *man/benini.Rd
-48d7a80011c803267396d4ad49424371 *man/beniniUC.Rd
-29172adb77fdc0148c50b6beed8a04dd *man/beta.ab.Rd
-b75d0112b0fd70acb58720daf5dc8e7e *man/betaII.Rd
+34b5510370a46ab522a754c731a437be *man/benfUC.Rd
+103d6afe4d897881692170608c47e7a4 *man/benini.Rd
+b3e26d0011014d3722b4ecb3675c4aea *man/beniniUC.Rd
+73192be7a4732b3e32cdc0edef65010e *man/beta.ab.Rd
+e661e278644730ad0602065afea6c240 *man/betaII.Rd
41820caae54231fdfe4f43c64c8b2aa6 *man/betabinomUC.Rd
1600b3f2a75c6a60546d1d01523b1b98 *man/betabinomial.Rd
-8f5474206d55a6a2d6f770b6fd97d9d8 *man/betabinomial.ab.Rd
+0258e72615475b5afbae20655f7d60f7 *man/betabinomial.ab.Rd
4f1141b7ef59dcfb3c52d96cb41e44df *man/betaff.Rd
da3fdbf88efd6225c08377a461e45c50 *man/betageomUC.Rd
8c75be04378f771189e287d9ec77ee71 *man/betageometric.Rd
f2729cad5024784c73e0d9fa6aaef394 *man/betanormUC.Rd
9065dcf96fd6b05e60189a5d5a5ee551 *man/betaprime.Rd
7adaeed3dae23da1a0cc5eb9358d4597 *man/bilogis4UC.Rd
-a4113bec799465dd87db6c2afacb855e *man/bilogistic4.Rd
-1b1505b19650baa2ce76fb08a24a628f *man/binom2.or.Rd
+992e6e71ae8c5a12ef3664da492829bc *man/bilogistic4.Rd
+c1fe467f3523193935adfd6b8e3ead1a *man/binom2.or.Rd
e4d7d902c5c17c65f48b0eb17cd14cd9 *man/binom2.orUC.Rd
bb62a8e00f036e4c1ffd7b6c24793d78 *man/binom2.rho.Rd
0a679878123b41e3eb8f7ec074c83dd9 *man/binom2.rhoUC.Rd
-abd7367e55b62d594509ac626ba251ec *man/binomialff.Rd
+4863f87dee822d43731cb82da063c443 *man/binomialff.Rd
461ddeea757c9690113126296c2fac55 *man/binormal.Rd
bdad9ecfb116c4f30f930bcaf7208735 *man/biplot-methods.Rd
3de6128c31694785566e9212b2f63999 *man/bisa.Rd
903c040af10a99cda997fc5a11402bfa *man/bisaUC.Rd
-4cb6cae438d1f9080373569ab44f3d4a *man/bivgamma.mckay.Rd
-65de692084ada2bcc2f8ad8665033a82 *man/bminz.Rd
+1190d249811d1a2d7dc952f8af02e90a *man/bivgamma.mckay.Rd
+342d3d5c9931bc7327dc44d346c402f6 *man/bmi.nz.Rd
ca0505aeb6143228b5ce142954ed3ba7 *man/borel.tanner.Rd
adc7dfd546ab8430e0806c3b965c4366 *man/bortUC.Rd
-ad622827843632a0af3c9da51effbe6d *man/brat.Rd
-0e4c602335596525d01e46d2613af724 *man/bratt.Rd
+d0f5ac12609fb094d86da4a90af85508 *man/brat.Rd
+2753db368f4c6ac8f145ed1988ff599e *man/bratUC.Rd
+124bbd982a378dca2151fcc854a07dfa *man/bratt.Rd
f640961a0c1a206ce052a54bb7b4ca34 *man/calibrate-methods.Rd
702754aad58a33aba1594bc7d2d45acf *man/calibrate.Rd
-a29c9de0df75b544fb778651fee7ca12 *man/calibrate.qrrvglm.Rd
-8c7794b8727f1a0b7220b1bf5001a1bb *man/calibrate.qrrvglm.control.Rd
+6cc85adda04a13e2ef01e0da265b67fd *man/calibrate.qrrvglm.Rd
+7bc25736ab5e60ead3c3bb6a34e34aa2 *man/calibrate.qrrvglm.control.Rd
7308576228b41ce02ac3b9f61c8f9f6e *man/cao.Rd
f15b81668cd82879e8f00897fb30eea9 *man/cao.control.Rd
d42538f50f7b5ce49b81b59403485955 *man/cardUC.Rd
@@ -236,24 +227,27 @@ d42538f50f7b5ce49b81b59403485955 *man/cardUC.Rd
e7b9c33bacc1d02d937453ab6ef7234a *man/cauchy.Rd
2ab80616c05e7aebdcf769c35316eab1 *man/ccoef-methods.Rd
8805fcc3975bce184bc92154da60bc6e *man/ccoef.Rd
-d30a5b6b43969dd631c7a45fadc1047e *man/cdf.lmscreg.Rd
+fd0d4488ddb3aa386bf1ed76f759450b *man/cdf.lmscreg.Rd
736c151641c47418c5641e4b50f72326 *man/cennormal1.Rd
-b0c254854e201fd1185b233ff0645ff8 *man/cenpoisson.Rd
+92e4f610ab29c8a3ce3d23e08e5be934 *man/cenpoisson.Rd
f6c605b4eed73b77cd5a3d90098632be *man/cgo.Rd
42cc5374d9f2d1fa077cabf5cb18cea2 *man/cgumbel.Rd
-594d372f4d9072817a9ea3ab4a0f14ad *man/chestnz.Rd
+8b1f242c28ecc87b8f3850ee789a144e *man/chest.nz.Rd
+fc640335c7cd7df304a7396820bd46c0 *man/chinese.nz.Rd
92b1bbec2b9554215c23402cbd03ca04 *man/chisq.Rd
8ecbb478efcf4b0184a994182b5b2b94 *man/clo.Rd
1e216ef8b7c72364a0e8d5d28a190fd2 *man/cloglog.Rd
1aa6ee888bb532eef1f232c9f6a02b5d *man/coalminers.Rd
c34d8e18e49ac22df6e9e9e0d59ca2a1 *man/constraints.Rd
-8d7c0956439111786d43b7b161f9cf5f *man/cqo.Rd
+8d5b5435cea0a91ffdadc459fa8f7905 *man/cqo.Rd
4b6e07b4fe4a71094c99e824f5b3cd91 *man/crashes.Rd
5c964fdf03906470eaddbbf3d39076ef *man/cratio.Rd
-066cfa56e6c030510d3a67e260fab32f *man/cumulative.Rd
-8e9284c16776ee6767344f3e94c40339 *man/dagum.Rd
+6fb9db2b54b6b351d5fa6ee4c1e0334e *man/crime.us.Rd
+1ecf20c4f89f51b6c4a4c0345c910b5c *man/cumulative.Rd
+13da678f2c718e0dc6bbbcd38d06ddf4 *man/dagum.Rd
+69387a098ea4f01d352f9b3faafbd504 *man/dagumUC.Rd
1f1a2e048bcc0061b8aa5f0d7fcb600b *man/dcennormal1.Rd
-cc0da9d5746513c788561a7e0a9ce8ab *man/deplot.lmscreg.Rd
+b2a696abb80c47fa0497c245c180ba13 *man/deplot.lmscreg.Rd
7f57d255543bc7d13dadf322805c99c0 *man/depvar.Rd
40a6d820457d0015ca60fe3a752ca80d *man/dexpbinomial.Rd
1bfcb86a014b0b758f50d132bd885679 *man/dirichlet.Rd
@@ -264,7 +258,7 @@ fe902b6457a11d51c758018d9dad7682 *man/enormUC.Rd
2ad791294f4220bacdd9dc1e07fb2e94 *man/enzyme.Rd
fb32261e27bdbbf3719163d4981742ba *man/erf.Rd
7a52af5919ffbe4f6491df743fd54d28 *man/erlang.Rd
-d8d6acd92c165383419aab1005d77ea4 *man/eunifUC.Rd
+016203ada813723df52817147e7da63a *man/eunifUC.Rd
a755d061d59cc71b7aeb44e7b224976c *man/expexp.Rd
8a3dffebc0871a56f7dc9f9f3bcfd60e *man/expexp1.Rd
f8ea6ce8d6fd230e8dcb593d09b50140 *man/expgeometric.Rd
@@ -273,51 +267,53 @@ bba52379a93d8f2e909b579215811554 *man/expgeometricUC.Rd
0c5cc8525c38f3ffb7bc8f880fe04a7e *man/explogarithmic.Rd
347d45279f0e72bc8c2dab25ace2f28c *man/explogarithmicUC.Rd
5cda1f3c70b2f647037c1ee4302efd63 *man/exponential.Rd
-f73ce79537efaf892331a48daa47927c *man/exppoisson.Rd
+f2c84a09c854f679856eccd4f4430e61 *man/exppoisson.Rd
8e5ff25491af9631e681241ed305bf94 *man/exppoissonUC.Rd
737c92f56c01d46e0219fcba779987fc *man/felix.Rd
842a3ba37b78b88f1e726338dc883d85 *man/felixUC.Rd
e89421f88d21f4867aec746c47b5e804 *man/fff.Rd
-73cf4977cf0b692b1de42c32a343d525 *man/fgm.Rd
-6b1e2a1c3b0ec43e6b396f4f22097e35 *man/fgmUC.Rd
+66f1c7e1e2f78f76ed1b5b7e7fa259bd *man/fgm.Rd
+0c4744ec66aa44b14f5c3dd2d79856a1 *man/fgmUC.Rd
0f91dd411c054004631a677eda63db79 *man/fill.Rd
b929e2ab670eb59700bc4a1db07bbbc0 *man/finney44.Rd
2a71cba3122f180deefc7eac6fd9500f *man/fisherz.Rd
-5d993e74b612f3c22698bec281201cc0 *man/fisk.Rd
-88f722aee35485da126d801c71b8aee1 *man/fittedvlm.Rd
+67865f518cabb0413c9e08042efd55fb *man/fisk.Rd
+8a4d96c331c9bd0f8a630a672f1cc2cd *man/fiskUC.Rd
+f50d6af678d60e23e1285f5d2c6255cc *man/fittedvlm.Rd
f0dd850a571209fb251db51db2b3d9a7 *man/fnormUC.Rd
619e4551f1f29af1cd2e80db5d5eb98c *man/fnormal1.Rd
18c339da4093664d14febbcf02f3a2b6 *man/frank.Rd
cdfcf8fb1eb1799a197dd90a5a245d9c *man/frankUC.Rd
-46f42985d5d6ba06ee337803eb8f6570 *man/frechet.Rd
-f3cca03b14f8a766483fb52c1624514c *man/frechetUC.Rd
+6f7745678b1aeec1b8dddea8db6f83b3 *man/frechet.Rd
+2716982ec8d58016f0d08737aecd8843 *man/frechetUC.Rd
a064b35aec006934e5667bdbbedd1b97 *man/freund61.Rd
bc47c6ee3e74df598d758b5e16abac90 *man/fsqrt.Rd
13cc0e1a0a95d020031deddecb4af563 *man/gamma1.Rd
152972ee5cd8c6d903ea1faba8d2b207 *man/gamma2.Rd
bc93b6e6e71256cee791e31125b0b1e7 *man/gamma2.ab.Rd
cf2ba12145a4e1626df9585d8fc72987 *man/gammahyp.Rd
-e7c54cc18a759b213837ecaa38f9fcda *man/garma.Rd
+66237ca3553faaf444f36b592a1cfc4b *man/garma.Rd
dbdc01466b43ed8302f46b2a63da17bb *man/gaussianff.Rd
-cf30751ebb774891747aa4ece03f25d1 *man/genbetaII.Rd
-e0441f26e110e94aa55d835ea5c1019a *man/gengamma.Rd
+e69868e255358424343cd680205ea6f9 *man/genbetaII.Rd
+988ec82425b040c71e0bfee8dcef00dd *man/gengamma.Rd
bd63e15c3ac9ad8a8213d4cdc8bb3440 *man/gengammaUC.Rd
-af2ae222bb2540c889277c64f3d331b8 *man/genpoisson.Rd
+47fd021736f77a04595d5c12e7ad4842 *man/genpoisson.Rd
f626c2b3188a5755dc93112aa3bcbcf5 *man/genrayleigh.Rd
c31e093e7b6e5a4a7959ba6404b85a23 *man/genrayleighUC.Rd
-b4584d10b2359e4b087c54bc22cafad6 *man/geometric.Rd
+bdd0441747900e5421d0fadaa907ed8f *man/geometric.Rd
78b7d9455f1eaa4572ff54427d77935f *man/get.smart.Rd
14a7e2eca6a27884e1673bd908df11e1 *man/get.smart.prediction.Rd
48676987a2581858d5b2992385d29134 *man/gev.Rd
564d66518a6ec5d2a303e16814266d8c *man/gevUC.Rd
690b69d50e92a781720cc547dd22c3b4 *man/gew.Rd
-ce9bfef334079c13a62f95162513d6ec *man/golf.Rd
-823e37f882dbbe96d43edec25addd2ce *man/gpd.Rd
+b4acd939599553a8f5fe60461c1d1940 *man/golf.Rd
+70f0f28c69b1f390c67fb4bcce125da1 *man/gpd.Rd
05ffba31706bba09ffb7a1d7a18e1a4e *man/gpdUC.Rd
-406afb4b18ca9f3b239f5419f2e6a59c *man/grc.Rd
+d262446f558ffbaba51cc8ff86e5ab1a *man/grain.us.Rd
+34ff9c06370afeb74babd58f0b8726bc *man/grc.Rd
63d054be8dbae4bf35a7b9b6992627e5 *man/gumbel.Rd
-d7c9a4c5322e34a0c0173c89bd1829fa *man/gumbelIbiv.Rd
-65c9eaecf95125e461b5fdf589d01497 *man/gumbelUC.Rd
+fce5cc2b341eb7e67c00f8c0d91ea287 *man/gumbelIbiv.Rd
+c3115a24f1bcd264b17912ed76c8fdb6 *man/gumbelUC.Rd
d60aa16831b87c86aaa5648b6c4afc76 *man/guplot.Rd
00b132289191052ac14659de9ab936fc *man/hspider.Rd
b5224b8a3e3ed7eae77129374e17c95c *man/huber.Rd
@@ -328,16 +324,18 @@ bb9248061e4bcf80a1f239192629dd44 *man/hued.Rd
d44f3df87816b5cf0f1ef98315300456 *man/huie.Rd
3cb4fc1b3a7f1a6bcf7822219ac25525 *man/hunua.Rd
08383189cb05fe01a3c8a5fa2e2c78c5 *man/huse.Rd
-e52bc61525d8bb31785915c0e42cf2f6 *man/hyperg.Rd
+dcd7c3b73c0e9437f777ea65f25f23c3 *man/hyperg.Rd
f134ace4dd0689809500d58933cff6dc *man/hypersecant.Rd
8d18339270dbc32b70c105c3500eb412 *man/hzeta.Rd
-59b384e572f60dce3ea6b74d12455561 *man/hzetaUC.Rd
-d41f639aadc6abd8759824d1e7516083 *man/iam.Rd
+1c82e233c218a874edc3b00547d8ee1b *man/hzetaUC.Rd
+9c03dfc0921099fdae21e7e340ac3cc0 *man/iam.Rd
7266e5dba641098cd882cb62a8e33244 *man/identity.Rd
-070efd951fc6152397cb15634788059f *man/inv.gaussianff.Rd
+7736014b1a24efd32b9f35eda358fe5e *man/inv.gaussianff.Rd
941470d5ff5e3a83089d1ec1af026f35 *man/invbinomial.Rd
-81940e20ea38f0ae4ab6277edcaf42e5 *man/invlomax.Rd
-da26800b94821aa6582a183ac62bf2f1 *man/invparalogistic.Rd
+126b1730f039090da87aaf947455c98b *man/invlomax.Rd
+01cb2a27a9c0eae7d315f3ca158749f5 *man/invlomaxUC.Rd
+25bf1baa9a1a4e7c9074e94572318db6 *man/invparalogistic.Rd
+c0161485e2448b7abdfd3da5ab738c0e *man/invparalogisticUC.Rd
a286dd7874899803d31aa0a72aad64f2 *man/is.smart.Rd
2e3e9b010e6c48ebc38720fe7a1d88fc *man/koenker.Rd
8fe841741b94002d204ba682bde54c8a *man/koenkerUC.Rd
@@ -355,93 +353,100 @@ ed0afe39738f1712b3981c3618c4f913 *man/leipnik.Rd
7fec5c64cf46a14b918a919590025ac6 *man/lino.Rd
2ef824a6f01bef38ed0076a1015fae79 *man/linoUC.Rd
c347f3d3752c3dcf7d9b614b3f62be6f *man/lirat.Rd
-c3dbc9895942144e600b7f7b8537bd56 *man/lms.bcg.Rd
-52d7b11419756062c68465f23adab1ba *man/lms.bcn.Rd
-04c44739582d71d4d6b8ca526c36a104 *man/lms.yjn.Rd
+4ae53304c7e161a7979e2dd08e74fd71 *man/lms.bcg.Rd
+98702304ab240fd2b82ba9a32911903e *man/lms.bcn.Rd
+ace0ac75d6a275e6814174949f40be92 *man/lms.yjn.Rd
ee69ac28aaab7887c656b857af21ffd2 *man/logUC.Rd
c362d03bf3e2c4c24f8e0f46af093a09 *man/logc.Rd
-2207f5080987b6271d61e9bffae6d427 *man/loge.Rd
+f3d3ed74f201143d09a98f938b684c6a *man/loge.Rd
e5c36efa7e692fd32de85fd9c4a347db *man/logff.Rd
-c06a10d369d8e25373a53df73436c812 *man/logistic.Rd
+5b7b7b672758091d20d8ff0f358f2550 *man/logistic.Rd
117884ae7e831a397caedf145a434c28 *man/logit.Rd
1da3783f1662d799690fdd081f721ee0 *man/loglapUC.Rd
-9b7259abd4c44c955e9ebe9e9087f2e1 *man/loglaplace.Rd
+3ffe1e60703b15f818cd7972cd8f44a9 *man/loglaplace.Rd
8232a213dfc8899703f6e57664efae69 *man/loglinb2.Rd
dcbd827fd3586f46fc4ca1a1495a9ea1 *man/loglinb3.Rd
-aea6b2adf7ffd43d5659d238c9729713 *man/loglog.Rd
-c765fec35de8b6822c02d3b41a75a7dc *man/lognormal.Rd
+dd9c84ba9c07cc9414175b41d94fe1f0 *man/loglog.Rd
+ff85df21653d22ed4cbf3138f82049d8 *man/lognormal.Rd
aad78245c7c13be5d22efbff8774adf8 *man/logoff.Rd
-5910ef0e4b1c84786a0a3fffe647aafd *man/lomax.Rd
+448a8a2f7e4dbec62a07f699bceb0650 *man/lomax.Rd
+1fa1bf8d11541be8d48de2ff954462b4 *man/lomaxUC.Rd
138808d36f9fb37444e28e0d2c426dd1 *man/lqnorm.Rd
+f6ce6b9c84be7adf18b37a78ea6622b6 *man/lrtest.Rd
8b21946b3c21a74d758e4b18117c0000 *man/lv.Rd
528f457d3ec33f6264ccf05670fac457 *man/lvplot.Rd
af30767e3ab7bfb0bc809409d7f39e84 *man/lvplot.qrrvglm.Rd
15d57ef2c0a922cef23f2d25cda5c3cc *man/lvplot.rrvglm.Rd
-8ddc307ce88b445463ca45bd66e1c967 *man/margeff.Rd
-15d5967068a26cbb6072ee9ded149076 *man/maxwell.Rd
-d57adc38eda1d2e55e38796d24aa38a9 *man/maxwellUC.Rd
-6605f4601c5ef2733771cba7a061556f *man/mbinomial.Rd
-cc1c4f080d0a30059d52ed93b9a3090c *man/mccullagh89.Rd
+49c02a1e6bf68c88e2357f717d929ba5 *man/margeff.Rd
+b5c6a5a36ebe07a60b152387e8096d9a *man/marital.nz.Rd
+f08033557088369199e94547b1740580 *man/maxwell.Rd
+3fa2c9ebae9651becc102930b49d03ca *man/maxwellUC.Rd
+10df4196cca726f8787c0c5f5656e3d0 *man/mbinomial.Rd
+7691a2cfdeb641439b0cb86959d6632f *man/mccullagh89.Rd
4d8d0f37dc8249d00e52283764534e98 *man/meplot.Rd
3660487df3e8da3023fa94195c717e06 *man/micmen.Rd
-aef1a3568d58fb50c01c0be1ebd44720 *man/mix2exp.Rd
-56639277cd56ffbb1840473640e5e95b *man/mix2normal1.Rd
+9a192c889be24f7bdd6176f9aca6744a *man/mix2exp.Rd
+032b58b8746fb0d18ed355acd28afa7f *man/mix2normal1.Rd
4aaae69710cd08f08bb7ce432cf2108d *man/mix2poisson.Rd
1d7e090a54f5524e6fe0711bb942be47 *man/model.framevlm.Rd
0db10fd10f2e69997d9fe9242aea3c7d *man/model.matrixvlm.Rd
febba2e46a2084aff84e8c76a388e400 *man/moffset.Rd
-e4bf75856828a8a128c9a95a663f2b6a *man/morgenstern.Rd
-b820e9ada80b37d5ab7ec410da992489 *man/multinomial.Rd
+dde2999ddb57cc4af821b2d2e2b65251 *man/morgenstern.Rd
+07fe33dc7b8afa5a25c52d7030788266 *man/multinomial.Rd
29ce3642cdb940b4bdbba7f6173a6a60 *man/nakagami.Rd
d87f98ccf030b9925fa27475890cd27e *man/nakagamiUC.Rd
-b22f922cf137d6ea4280bffc9551c733 *man/nbolf.Rd
-01598cf15d4f6301379a48aab34f99bc *man/negbinomial.Rd
-23eab49179dff394f6f7ea0e84d3bfa2 *man/normal1.Rd
-b1f3913fd20911c2e808c76f0ec3debd *man/notdocumentedyet.Rd
-a300b58d732cd75ac5879a434d7ee8d8 *man/nzc.Rd
-f284459968787cf71ed152683e1acd7d *man/nzmarital.Rd
-129d6f1bea94c96636e58707916d28c7 *man/olympic.Rd
-fd8cdb48779a490071c91054c8b5ba33 *man/ordpoisson.Rd
+38c45f8d05c910a957456dcb22c2cd4f *man/nbcanlink.Rd
+7a211d0cb765afa12ae6579af7d867d5 *man/nbolf.Rd
+285532c1c7ad5b17bc7ad287bef549d8 *man/negbinomial.Rd
+4511975c94fcfbe834ba7ca3e457c98d *man/negbinomial.size.Rd
+4c8b84458e8ee97cf8ec3189da73a78d *man/normal1.Rd
+b1b213e6113e6896acd7e8b2acaba125 *man/notdocumentedyet.Rd
+8a118515f4955e425adcd83f7da456ec *man/olympic.Rd
+1ca5bd6a9ee667125ba379e48e66c99e *man/ordpoisson.Rd
9ecbe9ab6cc7d40f41f10a71fdae5996 *man/oxtemp.Rd
-e1156bbc8f962d2849c56028e2430151 *man/paralogistic.Rd
-721fd4000631dec85c2f7164bfaf2e3e *man/pareto1.Rd
-f9511e9a7a3bb97e4ca6c6ed080d40ad *man/paretoIV.Rd
+af3da753da81709490046832ea545701 *man/paralogistic.Rd
+e82353ff6171e11bbeae4e3687bca231 *man/paralogisticUC.Rd
+97dc353975a803fd33bebd083c85713d *man/pareto1.Rd
+3c9ba189fa4f71114f3aa7248c169951 *man/paretoIV.Rd
+b89db00a67be3a3aaa3095f3174e831d *man/paretoIVUC.Rd
66f9463188664956ca69d58bd11a0e51 *man/persp.qrrvglm.Rd
53a43e65f00420564ad783888f356ff7 *man/plackUC.Rd
c542d660e94860e165d2945a855eae24 *man/plackett.Rd
-147b571a4d86267b1e5a6c1ad84ee5b0 *man/plotdeplot.lmscreg.Rd
+49808aa704ee72fb230c99b656d48d0b *man/plotdeplot.lmscreg.Rd
768d300d2a478398c5a77549922caa97 *man/plotqrrvglm.Rd
-e5609405022b247366503d18f15c6df6 *man/plotqtplot.lmscreg.Rd
+9653f109e0c0c5191306070e0f2b8ac9 *man/plotqtplot.lmscreg.Rd
9ae405fd77c85cab2a55f92664b1cc67 *man/plotrcam0.Rd
-96080febe9dd3644878ba14042913990 *man/plotvgam.Rd
-fb44d2ea3a4da3e63336746e7ae5affd *man/plotvgam.control.Rd
+db9c5b2ca7fd4417d4d88d02317acebb *man/plotvgam.Rd
+72bade4a008240a55ae5a8e5298e30b8 *man/plotvgam.control.Rd
aa55e676b3fd0fab0f1aee26ab9fa6de *man/pneumo.Rd
de61bd1899e2bd101d3977d2e25f163f *man/poissonff.Rd
aea0d6dabf75a88fc5bbf4cf77fef7ec *man/poissonp.Rd
-270ec562dcf4fd2a48221453d110c3eb *man/polf.Rd
+8abbf4f53f755542e7197830d026f514 *man/polf.Rd
a2fb4efb4037aaa2362579d73e78defa *man/polonoUC.Rd
0d1147afa73af7ae0860f36d2d7732f3 *man/posbinomUC.Rd
67c1153ac99b572401e73d68f665b2ab *man/posbinomial.Rd
+e73cb6828656ad718bbef4a46548e2ee *man/posgeomUC.Rd
76788187cad5a084cb5ad798b934bd07 *man/posnegbinUC.Rd
-83ad8c3435e907ecaa98615c6f1e18ad *man/posnegbinomial.Rd
+ccfe5f42d992cf7aa5f5309dade4aaf5 *man/posnegbinomial.Rd
0e2ea2f46537b34ccc6603fe56303983 *man/posnormUC.Rd
c4f9abd34a4cd9ea5b8a6fc3b88abd83 *man/posnormal1.Rd
-e7baad18a3f2c0df245687ff2e74413b *man/pospoisUC.Rd
-0f588e778a0c38a98841da257cecd4eb *man/pospoisson.Rd
+fdc592c22ffaf895ff085a9010bf7a17 *man/pospoisUC.Rd
+6cde192a6dbad131523057890c565ab2 *man/pospoisson.Rd
95386d432e396127192e5516a35059cd *man/powl.Rd
f5ca83cbbe57ce6a7e98a0318ddc6aac *man/predictqrrvglm.Rd
10003ea86273bd156fdbd6990c5f80d5 *man/predictvglm.Rd
d2b5e03b84a6c8b6ba9553155445c694 *man/prentice74.Rd
b0913ae27b2f0ab1e032bf3398a78d5a *man/probit.Rd
-1c6818a129a544e770b0dc1d9e6c0ee7 *man/propodds.Rd
+f400ce682e069e8cdacf3f6bbe187d69 *man/propodds.Rd
dc7a643eba4c2ac7bbd842ed27eb1023 *man/prplot.Rd
de570e252375d7052edaa7fb175f67eb *man/put.smart.Rd
-b3d7003f58379c80407662707543ca80 *man/qrrvglm.control.Rd
+602637ecc0fab44f08f45caab838f1fb *man/qrrvglm.control.Rd
e5ac6fc23dfa77497bbfe05831e5ea33 *man/qtplot.gumbel.Rd
-7fba3114d68b669c490cb599000f4824 *man/qtplot.lmscreg.Rd
+0636a2c78899c1eea2111afcb48617d9 *man/qtplot.lmscreg.Rd
64dceb3461595b09595b483f72ac8b42 *man/quasibinomialff.Rd
85d05c50101b02eb35a1e31d75226c05 *man/quasipoissonff.Rd
013fb5594d2df84c9fc9aad2dd822070 *man/rayleigh.Rd
+eeb74d98864573758cfe36ba13ef6ef1 *man/rayleighUC.Rd
03fb6a7f9cfc570ad5fd1bc59accc905 *man/rcqo.Rd
215e0a6f6611334b2b9ed8a35595227b *man/rdiric.Rd
ac9770dd82570248526fcc6fc5736e9a *man/recexp1.Rd
@@ -452,9 +457,9 @@ c4d52486cd29a6b05426ece0496dbf0c *man/riceUC.Rd
a80124978dea921b2b0f8f5ac7187bf2 *man/riceff.Rd
211f962003276a0a032c94b847bfc426 *man/rig.Rd
28a7ee11dedcd60712d830cc36f8c208 *man/rlplot.egev.Rd
-577b7fa7b66f8afb57c0b0df0283eeef *man/rrar.Rd
+fdf98b1b6024d9702c1ad361d87169fa *man/rrar.Rd
ed93c6e06d519ab3ddb92c73cf62bb67 *man/rrvglm-class.Rd
-2fa5b2b99f0383911d84ce0d6db9a7d1 *man/rrvglm.Rd
+6a69f5dc095de3eb11b473db1f52d481 *man/rrvglm.Rd
a5a699bccdf3768b9bc425b410d4328a *man/rrvglm.control.Rd
aacdffc764ae399ea515751128ff32fb *man/rrvglm.optim.control.Rd
b5936590eb374807b15d3d6f10257496 *man/ruge.Rd
@@ -463,7 +468,8 @@ b5936590eb374807b15d3d6f10257496 *man/ruge.Rd
71367fe3b494a45c98f9a96e1fd791e0 *man/setup.smart.Rd
fa349f195a44efe47ba19726c6d96725 *man/simplex.Rd
0b224135695156ba53178b78ba64690d *man/simplexUC.Rd
-61a0378592df48624dcd63630d6eddeb *man/sinmad.Rd
+d214ee1c6de6c4c8150be022d0b37c3a *man/sinmad.Rd
+d406cb5ce0d23612220d9011346b96e0 *man/sinmadUC.Rd
6e0c8526ef9dc5b8088eacec6d611448 *man/skellam.Rd
3b158a36468b4e9cb6ac33c6ecb7e59a *man/skellamUC.Rd
878eb152f75438a8c6d55ae6f56f938e *man/skewnormal1.Rd
@@ -479,77 +485,80 @@ bd869816cc0a7a1af02285c8ff7b6fbc *man/snormUC.Rd
da0473cfe60820a64e74d4e2d7492927 *man/tikuvUC.Rd
f11402d98706790ede99940cb03aaccd *man/tobit.Rd
dec960a58993b1941f7f0507673a951b *man/tobitUC.Rd
-deefc3b0d1473924687acdb454b77c01 *man/toxop.Rd
-72d33bd9b49016adbe4b08d070ed2c1b *man/tparetoUC.Rd
+7b79a4a3bbe4fcd9fa6ecfa66fa98ec8 *man/toxop.Rd
+1e9fb945744309465b729dceaf2b9e47 *man/tparetoUC.Rd
d656850a7fba6056bfcaf07a00510110 *man/triangle.Rd
8c327c816d9d56403d617a32fa704e9d *man/triangleUC.Rd
8fb0fbd98a56b1afced6cdceabea5c34 *man/trplot.Rd
-7f445b5722387e9458fd3ae83d54c852 *man/trplot.qrrvglm.Rd
-e589c53551f1b4939087e30738b4a52d *man/ugss.Rd
-165ffd320bf19a0d01fb88fd08405f5c *man/undocumented-methods.Rd
-1d2b4be48c8c99493c17f02fc0de7d04 *man/uqo.Rd
-6ee6dce1a0ce56b637caba0e80cb668b *man/uqo.control.Rd
-7e75f1629f7611bba758f8aa0dfc0894 *man/uscrime.Rd
-795c20aae2ad322727b8054ec92c7962 *man/usgrain.Rd
+5cab3d39bc52ba50848cdfcf64199d4c *man/trplot.qrrvglm.Rd
+1fc91e082e70251f46af4261f7d48f78 *man/ugss.Rd
+fc7e9713ea6a1c4216edd99f11bb8eb9 *man/undocumented-methods.Rd
+8d8835dd870d94aafa3259ecd2568337 *man/uqo.Rd
+f9eeeaeacdb82471c5230468b61d7bdd *man/uqo.control.Rd
986f3ae218b563bae795b67131082609 *man/venice.Rd
-6e4d3e5ae033e23124555b0a90337a5d *man/vgam-class.Rd
-fa381a9ae340781ff17f568787e1a6c7 *man/vgam.Rd
-2b53772dc12e8c7eee53122cf264c76b *man/vgam.control.Rd
+609b06037613c666ba82ef99fe67b97f *man/vgam-class.Rd
+6b001b0875c0a2b48f0bb61c683acdcf *man/vgam.Rd
+1d53ebf6fecfac1f339841ef9b3e8dac *man/vgam.control.Rd
b2bdeb9d2a6e9c2e7b8964d334b4378e *man/vglm-class.Rd
-82d8478a4295bbac3da6402c06101fc7 *man/vglm.Rd
-71ba57518236ae3d52e7a13c4539f808 *man/vglm.control.Rd
+4712b2fd1c5052f11b9c4fe7a02d8dbf *man/vglm.Rd
+d942f0381dec1582da7f219f4f4dfeca *man/vglm.control.Rd
d7e7f317461e888a57ee1082db178328 *man/vglmff-class.Rd
-f3904d70f23d42160da9afb7fc66b86e *man/vonmises.Rd
+e12f38d6fc651548bc7badbbee4b6d49 *man/vonmises.Rd
060df7afe140d1ef3b498e1492a9c1bb *man/vsmooth.spline.Rd
969885cabc2f70c78def5cef9621a648 *man/waitakere.Rd
0a974f438d1c92859d87f28896768b29 *man/wald.Rd
-9370cf7a77f1112b1f0080ccdc9d1bf3 *man/weibull.Rd
-57ad5859e60dda64aaf0420cf457a8c6 *man/weightsvglm.Rd
+8b94fe25920b5a05d4030b30f679176a *man/weibull.Rd
+9e552190553e5c08cc22b518d808fb9e *man/weightsvglm.Rd
f8652276dedb724f7baf7234f37ad2cc *man/wffc.P2star.Rd
f188fe990a99ec6a88e15e3ae69f1b01 *man/wffc.Rd
ae1ea0d10cfc8cbdee70a460c590c823 *man/wffc.indiv.Rd
ce03a749bcb5428662ac78b85bd6f08d *man/wffc.nc.Rd
664d89e742974a4be71a459a68bbfc80 *man/wffc.teams.Rd
655258cff21a67e1549b204ff3d451a5 *man/wrapup.smart.Rd
+5c74881dfc6fd864449dfd0d8c720386 *man/xs.nz.Rd
18bd4b883004bccce4c1d1c5d80bff98 *man/yeo.johnson.Rd
-5db544b3c6404442bab4998b60fa6d64 *man/yip88.Rd
+e397c38e07fedf212775293198657da3 *man/yip88.Rd
8e94dc10a59629c0f9147f940a371a84 *man/yulesimon.Rd
1475d89bd0a33754d7f91bafdd340299 *man/yulesimonUC.Rd
-f71996b89b9a762a5d206b8e44c8cf62 *man/zanegbinUC.Rd
-24e0f645feccbe9bbb98ffb409365ca7 *man/zanegbinomial.Rd
-be7b555df408a51019b24cff72871c4b *man/zapoisUC.Rd
-627fb3b2893f16fb52836f90f8712f93 *man/zapoisson.Rd
-7d566ab0865ba24b4b4b0c418c15890e *man/zero.Rd
+f64c6703e51cc24766ce5dc033b0ac3e *man/zabinomUC.Rd
+7f8fef37516d696a7b685f570c6cb202 *man/zabinomial.Rd
+a7788666a974919ff5b10692bc08a38b *man/zageomUC.Rd
+ffb759533fb11daa037a82826284c9d1 *man/zageometric.Rd
+acb519fd6da2d0bb67539f963310618a *man/zanegbinUC.Rd
+14f25ecee890bda5089e1b21158ee374 *man/zanegbinomial.Rd
+9de32f6cc8bc406ecdfa00d343b796e6 *man/zapoisUC.Rd
+a2d4334c39fb98b5612df57a414c7bd1 *man/zapoisson.Rd
+109b41d0929fdd2fea23bfa1ed23207d *man/zero.Rd
4e19a9181d3ce167b113abb5712489bb *man/zeta.Rd
e0ef189ae8251b5e0d20b614c18cdd5a *man/zetaUC.Rd
41b60aab45c01599e769a721da58ea86 *man/zetaff.Rd
-2afab46d583941a8be8f28ce90195d15 *man/zibinomUC.Rd
-fb0f01a8b40d4730ff0412f8fb198877 *man/zibinomial.Rd
-27dc243f70b76c931785951815025572 *man/zigeomUC.Rd
-0853776b9cfac2d08d028b483a3b3454 *man/zigeometric.Rd
-7161b2bf6ae08370e59dcb38157d1ae4 *man/zinegbinUC.Rd
-3b1d5fe4b2db4e320c726128c046d5ff *man/zinegbinomial.Rd
+e5afe0b17fcaa9b76a65041923bd16d2 *man/zibinomUC.Rd
+01f756bb5ae0f72629faaf2035539e70 *man/zibinomial.Rd
+ac50e58f22d511a8b288f3a3f84bfb5f *man/zigeomUC.Rd
+e407a1f99753be923e2f1a1c512aa72d *man/zigeometric.Rd
+2410e68bca42fa95ee6d2347025bf21c *man/zinegbinUC.Rd
+2aa7fce4177b3599057a728d77c94f58 *man/zinegbinomial.Rd
8548bc081e80aa464b3a4ffbf0a043f7 *man/zipebcom.Rd
-b181bd431ec865366e2db643fbc8b8f4 *man/zipf.Rd
+fe5ca22b6582340e5d6f4542c99446ae *man/zipf.Rd
84b96ae71fbc091562e27a5997446aa5 *man/zipfUC.Rd
-101696ce832be47b90fa13da220201d3 *man/zipoisUC.Rd
-9695a2ec8214a7196d2676cf48765859 *man/zipoisson.Rd
-ad08497921fd341e2853254f9528bff4 *src/caqo3.c
+7f91486b2e334088be2b61ec5ba187f6 *man/zipoisUC.Rd
+6714335e60bbb877ba24d424d186c8ba *man/zipoisson.Rd
+4aaf5efcfbcf1bdf32b13f632ac3ed0f *src/caqo3.c
69d2fd2a25229e368e8cf93ed005f14f *src/fgam.f
-ee68626f9fa7da088c9401345c937dd3 *src/gautr.c
+f8fe99dcda865eceb06b66f4976f4bf2 *src/gautr.c
dc1ca5b4e9a67b6d48c25e7107112d9c *src/lerchphi.c
-f15cb40ff05ab0763a62af4756b711f7 *src/lms.f
-fd2be53f9058e9eaf6fe9271f9fcf440 *src/muxr.c
+9dd33afbac4653b7d8bdbd2794b9c262 *src/lms.f
+9cfd5e51c2dba024afc28b0fffaece4a *src/muxr.c
6f2d68edb270dca177d290a0d62992fd *src/rgam.f
-97981ee3cd3bd77d69e3bacf9ecae231 *src/rgam3.c
+749d84c8e3b17645ddcdb85a77dc9acc *src/rgam3.c
10939d9fb380d54da716a835d37fdf75 *src/tyeepolygamma3.c
5d14c85e6eda8c2d1a3219a2aa3c708a *src/vcall2.f
3e145d8721d17dbd0e642508c2de1472 *src/veigen.f
91fd839e31da38b116c09ef24a3c25d0 *src/vgam.f
-32ba54d93eea2e514468d0c08431b185 *src/vgam3.c
+456d597ae327bc181a1405e3809f7665 *src/vgam3.c
bccf9d58334e1fde641a6d59443cd915 *src/vlinpack1.f
fe604895e0e9c3314f9d29378d1d0ed1 *src/vlinpack2.f
a7625ebca9616112544d1e8155a5922a *src/vlinpack3.f
93bbb9483bd82b692febd98c0cf10a6d *src/vmux.f
-b1a1e8d940090e51d25fa9c08fdc6a08 *src/vmux3.c
+0317d171d3fa308b4e19e2c386341945 *src/vmux3.c
d5c3783cc318a8e1c0b7aafcf5849dee *src/zeta3.c
diff --git a/NAMESPACE b/NAMESPACE
index 53d8b4a..fedd5c1 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -1,18 +1,28 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
-
useDynLib(VGAM)
+importMethodsFrom("stats4")
+
+
+
+importFrom(stats4, AIC, coef, summary, plot, logLik, vcov)
+exportMethods(AIC, coef, summary, plot, logLik, vcov)
+
+
+export(VGAMenv)
+export(lrtest, lrtest_vglm)
+export(update_default, update_formula)
export(nvar, nvar.vlm, nvar.vgam, nvar.rrvglm, nvar.qrrvglm, nvar.cao, nvar.rcam)
@@ -20,6 +30,10 @@ export( nobs.vlm)
+export(plota21)
+
+
+
export(confint_rrnb, confint_nb1)
export(vcovrrvglm)
@@ -28,6 +42,10 @@ export(huggins91, dhuggins91, rhuggins91)
+export(is.empty.list)
+
+
+
@@ -57,8 +75,8 @@ explogarithmic, dexplog, pexplog, qexplog, rexplog)
export(Rcam, plotrcam0,
rcam, summaryrcam)
export(moffset)
-export(plotqvar,Qvar)
-export(depvar,depvar.vlm)
+export(plotqvar, Qvar)
+export(depvar, depvar.vlm)
@@ -79,7 +97,6 @@ dimm)
-
export(is.smart, smart.mode.is, wrapup.smart, setup.smart, my1, my2)
export(
smart.expression,
@@ -97,6 +114,7 @@ export( bs, ns, scale.default, poly )
+
export(iam,
fill, fill1, fill2, fill3,
abbott,
@@ -130,6 +148,8 @@ inv.gaussianff, dinv.gaussian, pinv.gaussian, rinv.gaussian, wald,
expexp1, expexp)
+
+
export(A1A2A3, a2m, AAaa.nohw,
AICvlm, AICvgam, AICrrvglm,
AICqrrvglm, # AICvglm,
@@ -142,21 +162,23 @@ dexpbinomial,
dbetabinom, pbetabinom, rbetabinom, dbetabinom.ab, pbetabinom.ab, rbetabinom.ab,
biplot.qrrvglm,
dbort, rbort, borel.tanner,
-cauchy, cauchy1, ccoef.cao, ccoef.Coef.cao,
-ccoef.Coef.qrrvglm, ccoef.qrrvglm, cdf, cdf.lms.bcg, cdf.lms.bcn,
+cauchy, cauchy1,
+ccoef.cao, ccoef.Coef.cao, ccoef.Coef.qrrvglm, ccoef.qrrvglm,
+cdf, cdf.lms.bcg, cdf.lms.bcn,
cdf.lms.yjn, cdf.vglm,
-Coef.cao, Coefficients, coefqrrvglm,
+Coef.cao, Coefficients,
+coefqrrvglm,
coefvlm,
-coefvsmooth.spline, coefvsmooth.spline.fit, constraints,
-constraints.vlm,
+coefvsmooth.spline, coefvsmooth.spline.fit,
+constraints, constraints.vlm,
deplot, deplot.default, deplot.lms.bcg, deplot.lms.bcn,
-deplot.lms.yjn, deplot.lms.yjn2,
-deplot.vglm,
+deplot.lms.yjn, deplot.lms.yjn2, deplot.vglm,
deviance.uqo, deviance.vglm, deviance.vlm, df.residual.vlm,
dirmultinomial, dirmul.old,
dnorm2,
dtheta.deta)
+
export(cloglog,cauchit,elogit,explink,fisherz,logc,loge,logit,
logoff,nreciprocal,
probit,reciprocal,rhobit,
@@ -164,9 +186,10 @@ export(cloglog,cauchit,elogit,explink,fisherz,logc,loge,logit,
export(ordpoisson, poissonp)
+
+
export(m2adefault,
erlang,
-family.vglm,
dfelix, felix,
fitted.values.uqo, fittedvlm, fittedvsmooth.spline, fsqrt,
formulavlm, formulaNA.VGAM,
@@ -195,8 +218,10 @@ namesof,
nlminbcontrol, nloge,
Opt.Coef.qrrvglm, Opt.qrrvglm, persp.cao)
+
export( micmen )
+
export( plot.cao,
plotpreplotvgam,
plotvglm, plotvlm,
@@ -206,6 +231,17 @@ predictors, predictors.vglm,
predictqrrvglm, predict.rrvglm, predict.uqo, predict.vgam,
predictvglm, predict.vlm, predictvsmooth.spline,
predictvsmooth.spline.fit,
+ show.Coef.cao,
+ show.Coef.qrrvglm, show.Coef.rrvglm, show.rrvglm,
+ show.summary.cao, show.summary.qrrvglm,
+ show.summary.rrvglm, show.summary.uqo,
+ show.summary.vgam,
+ show.summary.vglm,
+ show.summary.vlm, show.uqo,
+ show.vanova,
+show.vgam, show.vglm, show.vlm,
+ show.vglmff,
+show.vsmooth.spline,
process.binomial2.data.vgam, process.categorical.data.vgam,
negzero.expression,
qtplot,
@@ -218,18 +254,22 @@ rlplot.vextremes, rlplot.vglm,
rlplot, rlplot.vglm, rrar.control,
rrvglm.control.Gaussian)
+
export(
-SurvS4, is.SurvS4, as.character.SurvS4, print.SurvS4,
+SurvS4, is.SurvS4, as.character.SurvS4,
+show.SurvS4,
simple.exponential, simple.poisson,
mbinomial,
seq2binomial, size.binomial,
stdze1, stdze2,
summary.cao, summary.grc,
summary.qrrvglm,
-summary.rrvglm, summary.uqo, summaryvgam,
-summaryvglm, summaryvlm, s.vam, terms.vlm,
+summary.rrvglm, summary.uqo,
+summaryvgam, summaryvglm, summaryvlm,
+s.vam, terms.vlm,
theta2eta, Tol.Coef.qrrvglm, Tol.Coef.uqo, Tol.qrrvglm, Tol.uqo,
triangle, dtriangle, ptriangle, qtriangle, rtriangle,
+ vcovvlm,
vglm.fit,
vglm.garma.control, vglm.multinomial.control,
vglm.multinomial.deviance.control, vglm.vcategorical.control,
@@ -241,44 +281,42 @@ zeta, zetaff,
dzeta)
+
+
export(lm2vlm.model.matrix)
+
+
importFrom("stats", model.matrix)
importFrom("stats", model.frame)
importFrom("stats", terms)
- importFrom("stats", "coef")
- importFrom("stats", "coefficients")
- importFrom("stats", "logLik")
- importFrom("graphics", "plot")
- importFrom("stats", "vcov")
- importFrom("stats", "AIC")
+ importFrom("stats", resid)
+ importFrom("stats", residuals)
+ importFrom("stats", fitted)
+ importFrom("stats", predict)
+ importFrom("stats", df.residual)
- importFrom("stats", "resid")
- importFrom("stats", "residuals")
- importFrom("stats", "fitted")
- importFrom("stats", "predict")
- importFrom("stats", "df.residual")
+ importFrom("stats", deviance)
+ importFrom("stats", fitted.values)
+ importFrom("stats", effects)
+ importFrom("stats", weights)
- importFrom("stats", "deviance")
- importFrom("stats", "fitted.values")
- importFrom("stats", "effects")
- importFrom("stats", "weights")
- importFrom("stats", "formula")
- importFrom("stats", "case.names")
- importFrom("stats", "variable.names")
+ importFrom("stats", formula)
+ importFrom("stats", case.names)
+ importFrom("stats", variable.names)
importFrom("stats", dchisq, pchisq,
@@ -287,7 +325,9 @@ export(lm2vlm.model.matrix)
dpois, ppois, qpois, rpois,
dnorm, pnorm, qnorm, rnorm)
- importFrom("graphics", "persp")
+
+ importFrom("graphics", persp)
+
@@ -326,6 +366,8 @@ export(fff, fff.control,
vonmises)
+
+
export(
AA.Aa.aa, AB.Ab.aB.ab2, AB.Ab.aB.ab, ABO, acat,
beta.ab, betaff,
@@ -339,14 +381,17 @@ binom2.rho, dbinom2.rho, rbinom2.rho, binom2.Rho,
binomialff, biplot.rrvglm, brat,
bratt, Brat, calibrate.qrrvglm.control, calibrate.qrrvglm,
calibrate, cao.control,
-cao, ccoef, cdf.lmscreg, cgo, chisq, clo,
-Coef.qrrvglm, Coef, Coef.rrvglm, Coef.vlm,
+cao,
+cdf.lmscreg, cgo, chisq, clo,
+ccoef,
+Coef, Coef.qrrvglm, Coef.rrvglm, Coef.vlm,
predictqrrvglm,
cratio, cumulative, propodds, prplot, prplot.control)
export(
deplot.lmscreg, dirichlet,
exponential, G1G2G3)
+
export(
lgammaff, lgamma3ff)
export(
@@ -373,6 +418,7 @@ loglog, lognormal3, lvplot.qrrvglm,
lvplot, lvplot.rrvglm, lv, Max, MNSs,
dmultinomial, multinomial, margeff)
+
export(
huber, huber1, dhuber, edhuber, phuber, qhuber, rhuber)
@@ -391,7 +437,7 @@ deexp, peexp, qeexp, reexp)
export(
meplot, meplot.default, meplot.vlm,
guplot, guplot.default, guplot.vlm,
-negbinomial, polya, normal1,
+negbinomial, negbinomial.size, polya, normal1, nbcanlink,
tobit, dtobit, ptobit, qtobit, rtobit,
Opt,
perspqrrvglm, plotdeplot.lmscreg, plotqrrvglm, plotqtplot.lmscreg,
@@ -399,6 +445,7 @@ plotvgam.control, plotvgam,
cenpoisson,
poissonff,
dposbinom, pposbinom, qposbinom, rposbinom, posbinomial,
+dposgeom, pposgeom, qposgeom, rposgeom, # posgeometric,
dposnegbin, pposnegbin, qposnegbin, rposnegbin, posnegbinomial,
dposnorm, pposnorm, qposnorm, rposnorm, posnormal1,
dpospois, ppospois, qpospois, rpospois, pospoisson,
@@ -406,6 +453,7 @@ qtplot.lmscreg, quasibinomialff, quasipoissonff, rdiric, rig,
rrar, rrvglm.control,
rrvglm.optim.control)
+
export(eta2theta,
rrvglm,
simplex, dsimplex, rsimplex,
@@ -421,10 +469,12 @@ vgam.control, vgam, vglm.control, vglm,
vsmooth.spline,
weibull, yip88,
dzanegbin, pzanegbin, qzanegbin, rzanegbin, zanegbinomial,
+dzabinom, pzabinom, qzabinom, rzabinom, zabinomial,
dzapois, pzapois, qzapois, rzapois, zapoisson,
dzibinom, pzibinom, qzibinom, rzibinom, zibinomial,
dzinegbin, pzinegbin, qzinegbin, rzinegbin, zinegbinomial,
dzigeom, pzigeom, qzigeom, rzigeom, zigeometric,
+dzageom, pzageom, qzageom, rzageom, zageometric,
dzipois, pzipois, qzipois, rzipois,
zipoisson, zipoissonff,
mix2exp, mix2normal1, mix2poisson,
@@ -440,44 +490,56 @@ export(DeLury,
)
-exportClasses("vglmff", "vlm", "vglm", "vgam",
-"rrvglm", "qrrvglm", "grc", "rcam",
-"vlmsmall", "uqo", "cao",
-"summary.vgam", "summary.vglm","summary.vlm",
- "summary.qrrvglm",
-"summary.cao", "summary.rrvglm",
-"Coef.rrvglm", "Coef.uqo", "Coef.qrrvglm", "Coef.cao",
-"vcov.qrrvglm",
-"vsmooth.spline.fit", "vsmooth.spline")
-
-exportClasses("SurvS4")
-
-
-
-exportMethods(
-"coef", "Coef", "coefficients",
-"constraints",
-"effects",
-"predict", "fitted", "fitted.values",
-"print",
-"resid",
-"residuals", "show",
-"summary",
-"terms",
-"model.frame",
-"model.matrix",
-"AIC",
-"deviance", "logLik", "vcov",
-"calibrate", "cdf", "ccoef", "df.residual",
-"lv", "Max", "Opt", "Tol",
-"biplot", "deplot", "lvplot", "qtplot", "rlplot", "meplot",
-"plot", "trplot", "vplot",
-"formula", "case.names", "variable.names",
-"nobs", # "nvar",
-"weights",
-"persp")
+exportClasses(vglmff, vlm, vglm, vgam,
+rrvglm, qrrvglm, grc, rcam,
+vlmsmall, uqo, cao,
+summary.vgam, summary.vglm, summary.vlm,
+summary.qrrvglm,
+summary.cao, summary.rrvglm,
+Coef.rrvglm, Coef.uqo, Coef.qrrvglm, Coef.cao,
+vcov.qrrvglm,
+vsmooth.spline.fit, vsmooth.spline)
+
+
+exportClasses(SurvS4)
+
+
+
+
+
+
+
+
+ exportMethods(
+Coef, coefficients,
+constraints,
+effects,
+predict, fitted, fitted.values,
+resid,
+residuals, show,
+terms,
+model.frame,
+model.matrix,
+summary,
+coef,
+AIC,
+plot,
+logLik,
+vcov,
+deviance,
+calibrate, cdf, ccoef, df.residual,
+lv, Max, Opt, Tol,
+biplot, deplot, lvplot, qtplot, rlplot, meplot,
+trplot, vplot,
+formula, case.names, variable.names,
+weights,
+persp)
+
+
+ exportMethods(AIC, coef, summary, plot, logLik, vcov)
+
diff --git a/NEWS b/NEWS
index 7b095b0..61ea009 100755
--- a/NEWS
+++ b/NEWS
@@ -6,6 +6,83 @@
+ CHANGES IN VGAM VERSION 0.8-6
+
+NEW FEATURES
+
+ o Modified VGAM family functions:
+ sinmad()@initialize has been improved.
+
+
+BUG FIXES and CHANGES
+
+ o VGAM now depends on R >= 2.14.0.
+ o Trying to eliminate some residual errors with the NAMESPACE.
+
+
+
+ CHANGES IN VGAM VERSION 0.8-5
+
+NEW FEATURES
+
+ o New VGAM family functions:
+ negbinomial.size(), zabinomial(dpqr),
+ zageometric(dpqr), [dpqr]posgeom().
+ o New link functions: nbcanlink().
+ o Modified VGAM family functions:
+ posnegbinomial(), zanegbinomial() and zinegbinomial()
+ use the nsimEIM argument;
+ zipoisson() handles a matrix response;
+ all [dpqr]zi-type functions handle zero-deflation,
+ normal1() can model the variance too as the 2nd parameter.
+ o Rudimentary methods functions for lrtest() and
+ update(), based on packages lmtest and base.
+ o The VGAM family functions for genetic models have been improved
+ wrt initial values.
+ o New data sets: xs.nz.
+
+
+BUG FIXES and CHANGES
+
+ o In anticipation for R version 2.15.0, VGAM imports
+ from stats4 'coef', 'plot', 'summary', 'vcov'.
+ Calls to 'print' have been replaced by 'show'
+ since VGAM uses S4 methods.
+ Numerous NAMESPACE changes have been made.
+ No more warnings during checking and installation!
+ o Labelling in summary() of vglm() objects changed. It
+ now closely follows glm(). In particular, it has
+ changed from c("Value", "Std. Error", "t value") to
+ c("Estimate", "Std. Error", "z value").
+ Note that "z value" might change later to, e.g., "Wald".
+ o Zero-inflated and zero-altered functions have renamed and
+ reordered arguments. Ouch!
+ These include 'pstr0' for probability of a structural 0 [zero-inflated],
+ and 'pobs0' for probability of an observed 0 [zero-altered].
+ For example, argument lpstr0 replaces lphi in zipoisson().
+ The order of these arguments, including the respective
+ dpqr-type functions, may have changed too.
+ o zapoisson() now implements Fisher scoring.
+ o zipoissonff() had the wrong sign for the non-diagonal EIM element.
+ o nobs() is now defined as a generic function (needed
+ for older versions of R---versions 2-12.2 or earlier, actually).
+ o Data sets renamed:
+ uscrime and usgrain renamed to crime.us and grain.us;
+ bminz renamed to bmi.nz,
+ nzc renamed to chinese.nz,
+ nzmarital renamed to marital.nz.
+ o Improved family functions: genbetaII(), betaII(), sinmad(), dagum(),
+ lomax(), invlomax(), fisk(), invparalogistic(), paralogistic();
+ wrt fitted values (range checks in place now).
+ These functions have many argument names changed, e.g.,
+ link.a is now lshape1.a, init.a is now ishape1.a.
+ Also, some default initial values have changed from 1 to 2.
+ o Argument names changed (Ouch!):
+ q.lag.ma changed to q.ma.lag in garma().
+
+
+
+
CHANGES IN VGAM VERSION 0.8-4
NEW FEATURES
diff --git a/R/aamethods.q b/R/aamethods.q
index cf0a713..766236c 100644
--- a/R/aamethods.q
+++ b/R/aamethods.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -86,39 +86,42 @@ if (FALSE)
-if (!isGeneric("print"))
- setGeneric("print", function(x, ...) standardGeneric("print"),
- package = "VGAM")
-print.vglmff <- function(x, ...) {
- f <- x at vfamily
- if (is.null(f))
- stop("not a VGAM family function")
- nn <- x at blurb
- if (!length(nn))
- invisible(return(x))
- cat("Family: ", f[1], "\n")
- if (length(f)>1) cat("Informal classes:", paste(f, collapse = ", "), "\n")
- cat("\n")
+show.vglmff <- function(object) {
+ f <- object at vfamily
+ if (is.null(f))
+ stop("not a VGAM family function")
+
+ nn <- object at blurb
+
+ cat("Family: ", f[1], "\n")
+ if (length(f) > 1)
+ cat("Informal classes:", paste(f, collapse = ", "), "\n")
+ cat("\n")
+
+ for(ii in 1:length(nn))
+ cat(nn[ii])
+ cat("\n")
+
+
- for(ii in 1:length(nn))
- cat(nn[ii])
- cat("\n")
- invisible(return(x))
}
-setMethod("print", "vglmff",
- function(x, ...)
- invisible(print.vglmff(x, ...)))
+
+
+
setMethod("show", "vglmff",
function(object)
- print.vglmff(x=object))
+ show.vglmff(object = object))
+
+
+
@@ -340,12 +343,12 @@ if (FALSE) {
if (!isGeneric("Coef"))
-setGeneric("Coef", function(object, ...) standardGeneric("Coef"),
- package = "VGAM")
+ setGeneric("Coef", function(object, ...) standardGeneric("Coef"),
+ package = "VGAM")
if (!isGeneric("Coefficients"))
-setGeneric("Coefficients", function(object, ...)
- standardGeneric("Coefficients"),
- package = "VGAM")
+ setGeneric("Coefficients", function(object, ...)
+ standardGeneric("Coefficients"),
+ package = "VGAM")
@@ -362,6 +365,7 @@ if (!isGeneric("plot"))
setGeneric("plot", function(x, y, ...) standardGeneric("plot"),
package = "VGAM")
+
if (!isGeneric("vcov"))
setGeneric("vcov", function(object, ...) standardGeneric("vcov"),
package = "VGAM")
@@ -373,6 +377,8 @@ if (!isGeneric("vcov"))
+
+
setClass("uqo", representation(
"lv" = "matrix",
"extra" = "list",
@@ -390,6 +396,7 @@ if (!isGeneric("lvplot"))
setGeneric("lvplot", function(object, ...) standardGeneric("lvplot"),
package = "VGAM")
+
if (!isGeneric("ccoef"))
setGeneric("ccoef", function(object, ...) standardGeneric("ccoef"),
package = "VGAM")
@@ -398,33 +405,37 @@ if (!isGeneric("ccoef"))
-if (!isGeneric("coef"))
setGeneric("coef", function(object, ...) standardGeneric("coef"),
package = "VGAM")
-if (!isGeneric("coefficients"))
+
setGeneric("coefficients", function(object, ...)
standardGeneric("coefficients"),
package = "VGAM")
+
if (!isGeneric("df.residual"))
setGeneric("df.residual", function(object, ...)
standardGeneric("df.residual"),
package = "VGAM")
+
if (!isGeneric("fitted"))
setGeneric("fitted", function(object, ...) standardGeneric("fitted"),
package = "VGAM")
+
if (!isGeneric("fitted.values"))
setGeneric("fitted.values", function(object, ...)
standardGeneric("fitted.values"),
package = "VGAM")
+
if (!isGeneric("model.matrix"))
setGeneric("model.matrix", function(object, ...)
standardGeneric("model.matrix"))
+
if (!isGeneric("model.frame"))
setGeneric("model.frame", function(formula, ...)
standardGeneric("model.frame"))
@@ -433,55 +444,66 @@ if (!isGeneric("model.frame"))
+
+
if (!isGeneric("predict"))
- setGeneric("predict", function(object, ...) standardGeneric("predict"))
+ setGeneric("predict", function(object, ...) standardGeneric("predict"))
+
if (!isGeneric("resid"))
- setGeneric("resid", function(object, ...) standardGeneric("resid"))
+ setGeneric("resid", function(object, ...) standardGeneric("resid"))
+
if (!isGeneric("residuals"))
- setGeneric("residuals", function(object, ...) standardGeneric("residuals"),
- package = "VGAM")
+ setGeneric("residuals", function(object, ...)
+ standardGeneric("residuals"),
+ package = "VGAM")
+
if (!isGeneric("weights"))
- setGeneric("weights", function(object, ...) standardGeneric("weights"),
- package = "VGAM")
+ setGeneric("weights", function(object, ...) standardGeneric("weights"),
+ package = "VGAM")
if (!isGeneric("AIC"))
- setGeneric("AIC", function(object, ..., k=2) standardGeneric("AIC"),
- package = "VGAM")
+ setGeneric("AIC", function(object, ..., k=2) standardGeneric("AIC"),
+ package = "VGAM")
- if (!isGeneric("formula"))
- setGeneric("formula", function(x, ...) standardGeneric("formula"),
- package = "VGAM")
+if (!isGeneric("formula"))
+ setGeneric("formula", function(x, ...) standardGeneric("formula"),
+ package = "VGAM")
+
+
+if (!isGeneric("case.names"))
+ setGeneric("case.names", function(object, ...)
+ standardGeneric("case.names"),
+ package = "VGAM")
+
+
+if (!isGeneric("variable.names"))
+ setGeneric("variable.names", function(object, ...)
+ standardGeneric("variable.names"),
+ package = "VGAM")
- if (!isGeneric("case.names"))
- setGeneric("case.names", function(object, ...)
- standardGeneric("case.names"),
- package = "VGAM")
- if (!isGeneric("variable.names"))
- setGeneric("variable.names", function(object, ...)
- standardGeneric("variable.names"),
- package = "VGAM")
-setMethod("nobs", "vlm",
- function(object, ...)
- nobs.vlm(object, ...))
+if (!isGeneric("summary"))
+ setGeneric("summary", function(object, ...)
+ standardGeneric("summary"),
+ package = "VGAM")
diff --git a/R/add1.vglm.q b/R/add1.vglm.q
index c13eb59..35f4770 100644
--- a/R/add1.vglm.q
+++ b/R/add1.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/attrassign.R b/R/attrassign.R
index 842a4b0..8a58530 100644
--- a/R/attrassign.R
+++ b/R/attrassign.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/bAIC.q b/R/bAIC.q
index d9d5b60..7d91359 100644
--- a/R/bAIC.q
+++ b/R/bAIC.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -12,7 +12,15 @@
if (TRUE) {
-AICvlm = function(object, ..., k=2) {
+
+if (!isGeneric("AIC"))
+ setGeneric("AIC", function(object, ..., k = 2)
+ standardGeneric("AIC"),
+ package = "VGAM")
+
+
+
+AICvlm = function(object, ..., k = 2) {
estdisp = object at misc$estimated.dispersion
no.dpar = if (length(estdisp) && is.logical(estdisp) && estdisp)
length(object at misc$dispersion) else 0
@@ -20,16 +28,17 @@ AICvlm = function(object, ..., k=2) {
}
-AICvgam = function(object, ..., k=2) {
+AICvgam = function(object, ..., k = 2) {
estdisp = object at misc$estimated.dispersion
no.dpar = if (length(estdisp) && is.logical(estdisp) && estdisp)
length(object at misc$dispersion) else 0
nldf = if (is.Numeric(object at nl.df)) sum(object at nl.df) else 0
- -2 * logLik.vlm(object, ...) + k * (length(coefvlm(object)) + no.dpar + nldf)
+ -2 * logLik.vlm(object, ...) +
+ k * (length(coefvlm(object)) + no.dpar + nldf)
}
-AICrrvglm = function(object, ..., k=2) {
+AICrrvglm = function(object, ..., k = 2) {
estdisp = object at misc$estimated.dispersion
no.dpar = if (length(estdisp) && is.logical(estdisp) && estdisp)
length(object at misc$dispersion) else 0
@@ -42,7 +51,7 @@ AICrrvglm = function(object, ..., k=2) {
}
-AICqrrvglm = function(object, ..., k=2) {
+AICqrrvglm = function(object, ..., k = 2) {
estdisp = object at misc$estimated.dispersion
no.dpar = if (length(estdisp) && is.logical(estdisp) && estdisp)
@@ -55,11 +64,12 @@ AICqrrvglm = function(object, ..., k=2) {
EqualTolerances = object at control$EqualTolerances
ITolerances = object at control$ITolerances
if (!(length(EqualTolerances) == 1 && is.logical(EqualTolerances)))
- stop("could not determine whether the fitted object used an ",
- "equal-tolerances assumption based on argument 'EqualTolerances'")
+ stop("could not determine whether the fitted object used an ",
+ "equal-tolerances assumption based on ",
+ "argument 'EqualTolerances'")
if (!(length(ITolerances) == 1 && is.logical(ITolerances)))
- stop("could not determine whether the fitted object used an ",
- "equal-tolerances assumption based on argument 'ITolerances'")
+ stop("could not determine whether the fitted object used an ",
+ "equal-tolerances assumption based on argument 'ITolerances'")
NOS = if (length(object at y)) ncol(object at y) else MMM
MSratio = MMM / NOS # First value is g(mean) = quadratic form in l
if (round(MSratio) != MSratio) stop("'MSratio' is not an integer")
@@ -73,24 +83,24 @@ AICqrrvglm = function(object, ..., k=2) {
setMethod("AIC", "vlm",
- function(object, ..., k=2)
- AICvlm(object, ..., k=k))
+ function(object, ..., k = 2)
+ AICvlm(object, ..., k = k))
setMethod("AIC", "vglm",
- function(object, ..., k=2)
- AICvlm(object, ..., k=k))
+ function(object, ..., k = 2)
+ AICvlm(object, ..., k = k))
setMethod("AIC", "vgam",
- function(object, ..., k=2)
- AICvgam(object, ..., k=k))
+ function(object, ..., k = 2)
+ AICvgam(object, ..., k = k))
setMethod("AIC", "rrvglm",
- function(object, ..., k=2)
- AICrrvglm(object, ..., k=k))
+ function(object, ..., k = 2)
+ AICrrvglm(object, ..., k = k))
setMethod("AIC", "qrrvglm",
- function(object, ..., k=2)
- AICqrrvglm(object, ..., k=k))
+ function(object, ..., k = 2)
+ AICqrrvglm(object, ..., k = k))
}
@@ -103,7 +113,7 @@ if (FALSE) {
-AICvglm = function(object, ..., k=2) {
+AICvglm = function(object, ..., k = 2) {
crit = logLik.vlm(object, ...)
-2 * crit + k * length(coef(object))
}
@@ -112,34 +122,33 @@ AICvglm = function(object, ..., k=2) {
-AICrrvglm = function(object, ..., k=2) {
- stop("not working yet")
- crit = logLik.vlm(object)
- sign = -2
- if (!length(crit) || !is.numeric(crit)) {
- crit = deviance(object)
- sign = 1
- }
- if (!length(crit) || !is.numeric(crit))
- stop("cannot get at the deviance or loglikelihood of the object")
+AICrrvglm = function(object, ..., k = 2) {
+ stop("not working yet")
+ crit = logLik.vlm(object)
+ sign = -2
+ if (!length(crit) || !is.numeric(crit)) {
+ crit = deviance(object)
+ sign = 1
+ }
+ if (!length(crit) || !is.numeric(crit))
+ stop("cannot get at the deviance or loglikelihood of the object")
- sign * crit + 2 * (length(coef(object)) +
- object at control$rank * (object at misc$M - object at control$rank))
+ sign * crit + 2 * (length(coef(object)) +
+ object at control$rank * (object at misc$M - object at control$rank))
}
- # setGeneric("AIC", function(object, ..., k = 2) standardGeneric("AIC"))
-setMethod("AIC", signature(object="vglm"),
- function(object, ..., k=2)
- AICvglm(object, ..., k=k))
+setMethod("AIC", signature(object = "vglm"),
+ function(object, ..., k = 2)
+ AICvglm(object, ..., k = k))
-setMethod("AIC", signature(object="rrvglm"),
- function(object, ..., k=2)
- AICrrvglm(object, ..., k=k))
+setMethod("AIC", signature(object = "rrvglm"),
+ function(object, ..., k = 2)
+ AICrrvglm(object, ..., k = k))
}
diff --git a/R/build.terms.vlm.q b/R/build.terms.vlm.q
index 4678dc2..85d7665 100644
--- a/R/build.terms.vlm.q
+++ b/R/build.terms.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/calibrate.q b/R/calibrate.q
index d039fb2..dd61d5e 100644
--- a/R/calibrate.q
+++ b/R/calibrate.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -14,27 +14,32 @@ calibrate.qrrvglm.control = function(object,
Method.optim="BFGS", # passed into optim(method=Method)
gridSize = if (Rank==1) 9 else 5,
varlvI = FALSE, ...) {
+
Rank = object at control$Rank
EqualTolerances = object at control$EqualTolerances
- if (!is.Numeric(gridSize, positive=TRUE, integer=TRUE, allow=1))
- stop("bad input for 'gridSize'")
+ if (!is.Numeric(gridSize, positive = TRUE,
+ integer.valued = TRUE, allowable.length = 1))
+ stop("bad input for 'gridSize'")
if (gridSize < 2)
- stop("gridSize must be >= 2")
+ stop("'gridSize' must be >= 2")
+
list(# maxit=Maxit.optim, # Note the name change
trace=as.numeric(trace)[1],
Method.optim=Method.optim,
gridSize=gridSize,
varlvI = as.logical(varlvI)[1])
-}
+}
+
if(!isGeneric("calibrate"))
- setGeneric("calibrate", function(object, ...) standardGeneric("calibrate"))
+ setGeneric("calibrate",
+ function(object, ...) standardGeneric("calibrate"))
calibrate.qrrvglm = function(object,
- newdata=NULL,
- type=c("lv","predictors","response","vcov","all3or4"),
- initial.vals=NULL, ...) {
+ newdata = NULL,
+ type = c("lv","predictors","response","vcov","all3or4"),
+ initial.vals = NULL, ...) {
Quadratic = if (is.logical(object at control$Quadratic))
object at control$Quadratic else FALSE # T if CQO, F if CAO
@@ -100,7 +105,7 @@ calibrate.qrrvglm = function(object,
optim(par=initial.vals[ii,],
fn=.my.calib.objfunction.qrrvglm,
method=optim.control$Method.optim, # "BFGS", or "CG" or ...
- control=c(fnscale=ifelse(minimize.obfunct,1,-1),
+ control = c(fnscale=ifelse(minimize.obfunct,1,-1),
optim.control),
y=newdata[i1,],
extra=object at extra,
@@ -112,7 +117,7 @@ calibrate.qrrvglm = function(object,
optim(par=initial.vals[ii,],
fn=.my.calib.objfunction.cao,
method=optim.control$Method.optim, # "BFGS", or "CG" or ...
- control=c(fnscale=ifelse(minimize.obfunct,1,-1),
+ control = c(fnscale=ifelse(minimize.obfunct,1,-1),
optim.control),
y=newdata[i1,],
extra=object at extra,
@@ -227,7 +232,7 @@ calibrate.qrrvglm = function(object,
}
}
-.my.calib.objfunction.qrrvglm = function(bnu, y, extra=NULL,
+.my.calib.objfunction.qrrvglm = function(bnu, y, extra = NULL,
objfun, Coefs,
misc.list,
everything=TRUE,
@@ -261,7 +266,7 @@ calibrate.qrrvglm = function(object,
-.my.calib.objfunction.cao = function(bnu, y, extra=NULL,
+.my.calib.objfunction.cao = function(bnu, y, extra = NULL,
objfun, object, Coefs,
misc.list,
everything=TRUE,
diff --git a/R/cao.R b/R/cao.R
index c1b03de..59ebb61 100644
--- a/R/cao.R
+++ b/R/cao.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -34,7 +34,7 @@ cao <- function(formula,
if (missing(data))
data <- environment(formula)
- mf <- match.call(expand=FALSE)
+ mf <- match.call(expand.dots = FALSE)
mf$family <- mf$method <- mf$model <- mf$x.arg <- mf$y.arg <-
mf$control <-
mf$contrasts <- mf$constraints <- mf$extra <- mf$qr.arg <- NULL
diff --git a/R/cao.fit.q b/R/cao.fit.q
index 17681ce..de47284 100644
--- a/R/cao.fit.q
+++ b/R/cao.fit.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -9,12 +9,12 @@
-cao.fit <- function(x, y, w=rep(1, length(x[, 1])),
- etastart=NULL, mustart=NULL, coefstart=NULL,
- offset=0, family,
- control=cao.control(...), criterion="coefficients",
- qr.arg=FALSE, constraints=NULL, extra=NULL,
- Terms=Terms, function.name="cao", ...)
+cao.fit <- function(x, y, w = rep(1, length(x[, 1])),
+ etastart = NULL, mustart = NULL, coefstart = NULL,
+ offset = 0, family,
+ control = cao.control(...), criterion = "coefficients",
+ qr.arg = FALSE, constraints = NULL, extra = NULL,
+ Terms=Terms, function.name = "cao", ...)
{
specialCM = NULL
post = list()
@@ -46,8 +46,8 @@ cao.fit <- function(x, y, w=rep(1, length(x[, 1])),
eval(family at initialize) # Initialize mu and M (and optionally w)
n <- n.save
- modelno = switch(family at vfamily[1], "poissonff"=2,
- "binomialff"=1, "quasipoissonff"=0, "quasibinomialff"=0,
+ modelno = switch(family at vfamily[1], "poissonff" = 2,
+ "binomialff" = 1, "quasipoissonff" = 0, "quasibinomialff" = 0,
"negbinomial"=3,
"gamma2"=5, "gaussianff"=8,
0) # stop("cannot fit this model using fast algorithm")
@@ -120,8 +120,8 @@ cao.fit <- function(x, y, w=rep(1, length(x[, 1])),
matrix(rnorm(p2*Rank, sd=rrcontrol$SD.Cinit), p2, Rank)
} else {
.Init.Poisson.QO(ymat=as.matrix(y),
- X1=x[,colx1.index,drop=FALSE],
- X2=x[,colx2.index,drop=FALSE],
+ X1 = x[,colx1.index,drop = FALSE],
+ X2 = x[,colx2.index,drop = FALSE],
Rank=rrcontrol$Rank, trace=rrcontrol$trace,
max.ncol.etamat = rrcontrol$Etamat.colmax,
Crow1positive=rrcontrol$Crow1positive,
@@ -143,7 +143,7 @@ cao.fit <- function(x, y, w=rep(1, length(x[, 1])),
lv.mat = x[, colx2.index, drop = FALSE] %*% Cmat
- rmfromVGAMenv(c("etamat", "beta"), prefix=".VGAM.CAO.")
+ rmfromVGAMenv(c("etamat", "beta"), prefix = ".VGAM.CAO.")
Nice21 = length(names.colx1.index) == 1 &&
names.colx1.index == "(Intercept)"
@@ -156,15 +156,15 @@ cao.fit <- function(x, y, w=rep(1, length(x[, 1])),
lenbeta = pstar. * ifelse(Nice21, NOS, 1)
othint = c(Rank, control$EqualTol, pstar. ,
- dim2wz=1, inited=0, # w(,dimw) cols
- modelno, maxitl=control$maxitl, actnits=0, twice=0, p1star. ,
- p2star. , Nice21, lenbeta, controlITolerances=0, control$trace,
- p1, p2=p2, imethod=control$imethod, bchat=0)
+ dim2wz = 1, inited = 0, # w(,dimw) cols
+ modelno, maxitl=control$maxitl, actnits = 0, twice = 0, p1star. ,
+ p2star. , Nice21, lenbeta, controlITolerances = 0, control$trace,
+ p1, p2=p2, imethod=control$imethod, bchat = 0)
othdbl = c(small=control$SmallNo, fseps=control$epsilon,
.Machine$double.eps,
iKvector=rep(control$iKvector, len=NOS),
iShape=rep(control$iShape, len=NOS),
- resss=0, bfeps=control$bf.epsilon, hstep=0.1)
+ resss = 0, bfeps=control$bf.epsilon, hstep = 0.1)
for(iter in 1:optim.maxit) {
if (control$trace) {
@@ -174,17 +174,17 @@ cao.fit <- function(x, y, w=rep(1, length(x[, 1])),
conjgrad = optim(par=c(Cmat), fn=callcaoc,
gr = if (control$GradientFunction) calldcaoc else NULL,
- method="BFGS",
- control=list(fnscale=1, trace=as.integer(control$trace),
- maxit=control$Maxit.optim, REPORT=10),
- etamat=eta, xmat=x, ymat=y, # as.matrix(y),
+ method = "BFGS",
+ control=list(fnscale = 1, trace=as.integer(control$trace),
+ maxit=control$Maxit.optim, REPORT = 10),
+ etamat=eta, xmat = x, ymat=y, # as.matrix(y),
wvec=w, modelno=modelno,
Control=control,
Nice21=Nice21,
p1star. = p1star. , p2star. = p2star. ,
n=n, M=M,
othint=othint, othdbl=othdbl,
- alldump=FALSE)
+ alldump = FALSE)
Cmat = matrix(conjgrad$par, p2, Rank) # old becoz of scale(cmatrix)
@@ -207,13 +207,13 @@ cao.fit <- function(x, y, w=rep(1, length(x[, 1])),
flush.console()
temp9 =
callcaoc(cmatrix=Cmat,
- etamat=eta, xmat=x, ymat=y, wvec=w, modelno=modelno,
+ etamat=eta, xmat = x, ymat=y, wvec=w, modelno=modelno,
Control=control,
Nice21=Nice21,
p1star. = p1star. , p2star. = p2star. ,
n=n, M=M,
othint=othint, othdbl=othdbl,
- alldump=TRUE)
+ alldump = TRUE)
if (!is.list(extra))
extra = list()
extra$Cmat = temp9$Cmat
@@ -237,7 +237,7 @@ cao.fit <- function(x, y, w=rep(1, length(x[, 1])),
extra$alldeviance = temp9$alldeviance
names(extra$alldeviance) = ynames
- mu = matrix(temp9$fitted, n, NOS, byrow=TRUE)
+ mu = matrix(temp9$fitted, n, NOS, byrow = TRUE)
@@ -316,19 +316,19 @@ cao.fit <- function(x, y, w=rep(1, length(x[, 1])),
-cao.control = function(Rank=1,
+cao.control = function(Rank = 1,
all.knots = FALSE,
- criterion="deviance",
- Cinit=NULL,
- Crow1positive=TRUE,
+ criterion = "deviance",
+ Cinit = NULL,
+ Crow1positive = TRUE,
epsilon = 1.0e-05,
Etamat.colmax = 10,
- GradientFunction=FALSE, # For now 24/12/04
+ GradientFunction = FALSE, # For now 24/12/04
iKvector = 0.1,
iShape = 0.1,
Norrr = ~ 1,
SmallNo = 5.0e-13,
- Use.Init.Poisson.QO=TRUE,
+ Use.Init.Poisson.QO = TRUE,
Bestof = if (length(Cinit)) 1 else 10,
maxitl = 10, # was 40 prior to 20100420
@@ -346,11 +346,12 @@ cao.control = function(Rank=1,
spar2 = 0, # 0 means df2.nl is used
...)
{
- if (!is.Numeric(iShape, posit=TRUE))
+ if (!is.Numeric(iShape, positive = TRUE))
stop("bad input for 'iShape'")
- if (!is.Numeric(iKvector, posit=TRUE))
+ if (!is.Numeric(iKvector, positive = TRUE))
stop("bad input for 'iKvector'")
- if (!is.Numeric(imethod, posit=TRUE, allow=1, integer=TRUE))
+ if (!is.Numeric(imethod, positive = TRUE, allowable.length = 1,
+ integer.valued = TRUE))
stop("bad input for 'imethod'")
if (criterion != "deviance") stop("'criterion' must be 'deviance'")
if (GradientFunction)
@@ -360,24 +361,33 @@ cao.control = function(Rank=1,
if (length(Cinit) && !is.Numeric(Cinit))
stop("Bad input for 'Cinit'")
- if (!is.Numeric(Bestof, allow=1, integ=TRUE, posit=TRUE))
+ if (!is.Numeric(Bestof, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
stop("Bad input for 'Bestof'")
- if (!is.Numeric(maxitl, allow=1, integ=TRUE, posit=TRUE))
+ if (!is.Numeric(maxitl, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
stop("Bad input for 'maxitl'")
- if (!is.Numeric(bf.epsilon, allow=1, posit=TRUE))
+ if (!is.Numeric(bf.epsilon, allowable.length = 1,
+ positive = TRUE))
stop("Bad input for 'bf.epsilon'")
- if (!is.Numeric(bf.maxit, integ=TRUE, posit=TRUE, allow=1))
+ if (!is.Numeric(bf.maxit, integer.valued = TRUE,
+ positive = TRUE, allowable.length = 1))
stop("Bad input for 'bf.maxit'")
- if (!is.Numeric(Etamat.colmax, posit=TRUE, allow=1) ||
+ if (!is.Numeric(Etamat.colmax, positive = TRUE,
+ allowable.length = 1) ||
Etamat.colmax < Rank)
stop("bad input for 'Etamat.colmax'")
- if (!is.Numeric(Maxit.optim, integ=TRUE, posit=TRUE, allow=1))
+ if (!is.Numeric(Maxit.optim, integer.valued = TRUE,
+ positive = TRUE, allowable.length = 1))
stop("Bad input for 'Maxit.optim'")
- if (!is.Numeric(optim.maxit, allow=1, integ=TRUE, posit=TRUE))
+ if (!is.Numeric(optim.maxit, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
stop("Bad input for 'optim.maxit'")
- if (!is.Numeric(SD.sitescores, allow=1, posit=TRUE))
+ if (!is.Numeric(SD.sitescores, allowable.length = 1,
+ positive = TRUE))
stop("Bad input for 'SD.sitescores'")
- if (!is.Numeric(SD.Cinit, allow=1, posit=TRUE))
+ if (!is.Numeric(SD.Cinit, allowable.length = 1,
+ positive = TRUE))
stop("Bad input for 'SD.Cinit'")
if (!is.Numeric(df1.nl) || any(df1.nl < 0))
stop("Bad input for 'df1.nl'")
@@ -395,19 +405,19 @@ cao.control = function(Rank=1,
stop("Bad input for 'spar1'")
if (!is.Numeric(spar2) || any(spar2 < 0))
stop("Bad input for 'spar2'")
- if (!is.Numeric(epsilon, posit = TRUE, allow = 1))
+ if (!is.Numeric(epsilon, positive = TRUE, allowable.length = 1))
stop("Bad input for 'epsilon'")
- if (!is.Numeric(SmallNo, posit = TRUE, allow = 1))
+ if (!is.Numeric(SmallNo, positive = TRUE, allowable.length = 1))
stop("Bad input for 'SmallNo'")
if ((SmallNo < .Machine$double.eps) ||
(SmallNo > .0001)) stop("'SmallNo' is out of range")
ans = list(
- Corner=FALSE, # A constant, not a control parameter; unneeded?
- EqualTolerances=FALSE, # A constant, not a control parameter; needed
- ITolerances=FALSE, # A constant, not a control parameter; unneeded?
- Quadratic=FALSE, # A constant, not a control parameter; unneeded?
+ Corner = FALSE, # A constant, not a control parameter; unneeded?
+ EqualTolerances = FALSE, # A constant, not a control parameter; needed
+ ITolerances = FALSE, # A constant, not a control parameter; unneeded?
+ Quadratic = FALSE, # A constant, not a control parameter; unneeded?
all.knots = as.logical(all.knots)[1],
Bestof = Bestof,
Cinit=Cinit,
@@ -447,7 +457,8 @@ cao.control = function(Rank=1,
create.cms <- function(Rank = 1, M, MSratio = 1, which, p1 = 1) {
- if (!is.Numeric(p1, allow = 1, integ = TRUE, pos = TRUE))
+ if (!is.Numeric(p1, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
stop("bad input for 'p1'")
Blist. = vector("list", p1 + Rank)
for(rr in 1:(p1+Rank))
@@ -465,7 +476,7 @@ create.cms <- function(Rank = 1, M, MSratio = 1, which, p1 = 1) {
callcaoc = function(cmatrix,
etamat, xmat, ymat, wvec, modelno,
- Control, Nice21=TRUE,
+ Control, Nice21 = TRUE,
p1star. = if (modelno %in% c(3, 5)) 2 else 1,
p2star. = Rank,
n, M,
@@ -482,28 +493,28 @@ callcaoc = function(cmatrix,
stop("the column names of 'ymat' must be given")
queue = qbig = Rank # 19/10/05; number of smooths per species
NOS = if (modelno %in% c(3, 5)) M/2 else M
- df1.nl = procVec(control$df1.nl, yn= yn , Def = control$DF1)
- spar1 = procVec(control$spar1, yn= yn , Def = control$SPAR1)
- df2.nl = procVec(control$df2.nl, yn= yn , Def = control$DF2)
- spar2 = procVec(control$spar2, yn= yn , Def = control$SPAR2)
+ df1.nl = procVec(control$df1.nl, yn = yn , Default = control$DF1)
+ spar1 = procVec(control$spar1, yn = yn , Default = control$SPAR1)
+ df2.nl = procVec(control$df2.nl, yn = yn , Default = control$DF2)
+ spar2 = procVec(control$spar2, yn = yn , Default = control$SPAR2)
if (any(c(length(spar1), length(spar2), length(df1.nl),
length(df2.nl)) != NOS))
stop("wrong length in at least one of ",
"'df1.nl', 'df2.nl', 'spar1', 'spar2'")
cmatrix = matrix(cmatrix, p2, Rank) # crow1C() needs a matrix as input
- cmatrix = crow1C(cmatrix, crow=control$Crow1positive)
- numat = xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
+ cmatrix = crow1C(cmatrix, crow1positive =control$Crow1positive)
+ numat = xmat[,control$colx2.index,drop = FALSE] %*% cmatrix
evnu = eigen(var(numat))
temp7 = if (Rank > 1) evnu$vector %*% diag(evnu$value^(-0.5)) else
evnu$vector %*% evnu$value^(-0.5)
cmatrix = cmatrix %*% temp7
- cmatrix = crow1C(cmatrix, crow=control$Crow1positive)
- numat = xmat[,control$colx2.index,drop=FALSE] %*% cmatrix
+ cmatrix = crow1C(cmatrix, crow1positive =control$Crow1positive)
+ numat = xmat[,control$colx2.index,drop = FALSE] %*% cmatrix
dim(numat) = c(n, Rank)
- mynames5 = if (Rank == 1) "lv" else paste("lv", 1:Rank, sep="")
+ mynames5 = if (Rank == 1) "lv" else paste("lv", 1:Rank, sep = "")
nu1mat = cbind("(Intercept)" = 1, lv = numat)
dimnames(nu1mat) = list(dimnames(xmat)[[1]], c("(Intercept)", mynames5))
@@ -528,7 +539,7 @@ callcaoc = function(cmatrix,
if (any(is.na(usethiseta))) {
usethiseta = t(etamat) # So that dim(usethiseta) == c(M,n)
- rmfromVGAMenv("etamat", prefix=".VGAM.CAO.")
+ rmfromVGAMenv("etamat", prefix = ".VGAM.CAO.")
}
usethisbeta = if (inited == 2)
@@ -545,17 +556,17 @@ callcaoc = function(cmatrix,
origBlist = Blist. = create.cms(Rank=Rank, M=M., MSratio=MSratio,
which=which, p1=p1) # For 1 species only
ncolBlist. <- unlist(lapply(Blist. , ncol))
- smooth.frame = s.vam(x=nu1mat, zedd=NULL, wz=NULL, smomat=NULL,
+ smooth.frame = s.vam(x=nu1mat, zedd = NULL, wz = NULL, smomat = NULL,
which=which,
smooth.frame=temp.smooth.frame,
bf.maxit=control$bf.maxit,
bf.epsilon=control$bf.epsilon,
- trace=FALSE, se.fit=control$se.fit,
+ trace = FALSE, se.fit=control$se.fit,
X_vlm_save=bnumat, Blist=Blist. ,
ncolBlist=ncolBlist. ,
- M= M. , qbig=NULL, Umat=NULL, # NULL ==> unneeded
- all.knots=control$all.knots, nk=NULL,
- sf.only=TRUE)
+ M= M. , qbig = NULL, Umat = NULL, # NULL ==> unneeded
+ all.knots=control$all.knots, nk = NULL,
+ sf.only = TRUE)
ldk <- 3 * max(ncolBlist.[nwhich]) + 1 # 11/7/02
@@ -590,18 +601,18 @@ callcaoc = function(cmatrix,
npetc = c(n=nrow(nu1mat), p. =ncol(nu1mat), q=length(which),
se.fit=control$se.fit, 0,
- control$bf.maxit, qrank=0, M= M. , nbig=nstar, pbig=pbig,
- qbig=qbig, dim2wz= dimw. , dim1U= dim1U. , ierror=0, ldk=ldk,
+ control$bf.maxit, qrank = 0, M= M. , nbig=nstar, pbig=pbig,
+ qbig=qbig, dim2wz= dimw. , dim1U= dim1U. , ierror = 0, ldk=ldk,
contr.sp$maxit, iinfo = 0)
if (Rank == 2) {
- smopar = (c(spar1, spar2))[interleave.VGAM(4*NOS, M=2)]
- dofvec = (1.0 + c(df1.nl, df2.nl))[interleave.VGAM(4*NOS, M=2)]
+ smopar = (c(spar1, spar2))[interleave.VGAM(4*NOS, M = 2)]
+ dofvec = (1.0 + c(df1.nl, df2.nl))[interleave.VGAM(4*NOS, M = 2)]
lamvec = 0 * dofvec
- stop("20100414; havent got Rank=2 going yet")
+ stop("20100414; havent got Rank = 2 going yet")
} else {
smopar = c(spar1, spar2)
dofvec = c(df1.nl, df2.nl) + 1.0
@@ -641,13 +652,13 @@ flush.console()
if (ans1$errcode == 0) {
- assign2VGAMenv(c("etamat", "beta"), ans1, prefix=".VGAM.CAO.")
+ assign2VGAMenv(c("etamat", "beta"), ans1, prefix = ".VGAM.CAO.")
assign(".VGAM.CAO.cmatrix", matrix(cmatrix,p2,Rank), envir=VGAM:::VGAMenv)
} else {
- cat("warning in callcaoc: error code =", ans1$errcode, "\n")
- cat("warning in callcaoc: npetc[14] =", ans1$npetc[14], "\n")
+ cat("warning in callcaoc: error code = ", ans1$errcode, "\n")
+ cat("warning in callcaoc: npetc[14] = ", ans1$npetc[14], "\n")
flush.console()
- rmfromVGAMenv(c("etamat", "beta"), prefix=".VGAM.CAO.")
+ rmfromVGAMenv(c("etamat", "beta"), prefix = ".VGAM.CAO.")
}
returnans = if (alldump) {
@@ -663,7 +674,7 @@ flush.console()
ind7 = (smooth.frame$bindex[ii]):(smooth.frame$bindex[ii+1]-1)
ans = ans1$bcoeff[ind9+ind7]
ans = matrix(ans, ncol=ncolBlist[nwhich[ii]])
- Bspline[[ii]] = new(Class="vsmooth.spline.fit",
+ Bspline[[ii]] = new(Class = "vsmooth.spline.fit",
"Bcoefficients" = ans,
"xmax" = smooth.frame$xmax[ii],
"xmin" = smooth.frame$xmin[ii],
@@ -703,7 +714,7 @@ flush.console()
kindex = ans1$kindex,
lambda1 = lambda1,
lambda2 = if (Rank == 2) lambda2 else NULL,
- predictors = matrix(ans1$etamat, n, M, byrow=TRUE),
+ predictors = matrix(ans1$etamat, n, M, byrow = TRUE),
wresiduals = ans1$zedd - t(ans1$etamat), # n x M
spar1 = spar1,
spar2 = if (Rank == 2) spar2 else NULL)
@@ -733,16 +744,16 @@ calldcaoc = function(cmatrix,
Rank = control$Rank
p2 = length(control$colx2.index)
yn = dimnames(ymat)[[2]]
- if (!length( yn )) yn = paste("Y", 1:ncol(ymat), sep="")
+ if (!length( yn )) yn = paste("Y", 1:ncol(ymat), sep = "")
cmatrix = scale(cmatrix)
- xmat2 <- xmat[,control$colx2.index,drop=FALSE] #ccc
+ xmat2 <- xmat[,control$colx2.index,drop = FALSE] #ccc
numat <- xmat2 %*% matrix(cmatrix, p2, Rank)
dim(numat) <- c(nrow(xmat), Rank)
temp.smooth.frame = vector("list", 1+Rank) # Temporary makeshift frame
- mynames5 = if (Rank == 1) "lv" else paste("lv",1:Rank,sep="")
+ mynames5 = if (Rank == 1) "lv" else paste("lv",1:Rank,sep = "")
names(temp.smooth.frame) = c("(Intercept)", mynames5)
temp.smooth.frame[[1]] = rep(1, len=n)
for(uu in 1:Rank) {
@@ -784,20 +795,21 @@ calldcaoc = function(cmatrix,
origBlist = Blist. = create.cms(Rank=Rank, M=M., MSratio=MSratio,
which=which, p1 = p1) # For 1 species
ncolBlist. <- unlist(lapply(Blist. , ncol))
- nu1mat = cbind("(Intercept)"=1, lv=numat)
+ nu1mat = cbind("(Intercept)" = 1, lv=numat)
dimnames(nu1mat) = list(dimnames(xmat)[[1]], c("(Intercept)","lv"))
- smooth.frame = s.vam(x=nu1mat, zedd=NULL, wz=NULL, smomat=NULL,
- which=which,
- smooth.frame=temp.smooth.frame,
- bf.maxit=control$bf.maxit,
- bf.epsilon=control$bf.epsilon,
- trace=FALSE, se.fit=control$se.fit,
- X_vlm_save=bnumat, Blist=Blist.,
- ncolBlist=ncolBlist. ,
- M= M. , qbig=NULL, Umat=U, # NULL value ==> not needed
- all.knots=control$all.knots, nk=NULL,
- sf.only=TRUE)
+ smooth.frame = s.vam(x=nu1mat, zedd = NULL, wz = NULL, smomat = NULL,
+ which = which,
+ smooth.frame = temp.smooth.frame,
+ bf.maxit = control$bf.maxit,
+ bf.epsilon = control$bf.epsilon,
+ trace = FALSE, se.fit = control$se.fit,
+ X_vlm_save = bnumat, Blist = Blist.,
+ ncolBlist = ncolBlist. ,
+ M = M. , qbig = NULL,
+ Umat = U, # NULL value ==> not needed
+ all.knots = control$all.knots, nk = NULL,
+ sf.only = TRUE)
ldk <- 4 * max(ncolBlist.[nwhich]) # was M; # Prior to 11/7/02
ldk <- 3 * max(ncolBlist.[nwhich]) + 1 # 11/7/02
@@ -832,9 +844,9 @@ calldcaoc = function(cmatrix,
spar2 = rep(control$spar2, len=NOS) # This is used
} else {
# This is used
- df1.nl = procVec(control$df1.nl, yn= yn , Def = control$DF1)
+ df1.nl = procVec(control$df1.nl, yn = yn , Default = control$DF1)
df2.nl = df1.nl # 20100417; stopgap
- spar1 = procVec(control$spar1, yn= yn , Def = control$SPAR1)
+ spar1 = procVec(control$spar1, yn = yn , Default = control$SPAR1)
spar2 = spar1 # 20100417; stopgap
dofvec = c(df1.nl, df2.nl)
lamvec = 0 * dofvec
@@ -860,17 +872,17 @@ calldcaoc = function(cmatrix,
warning("20100405; this is old:")
- npetc = c(n=n, p=1+Rank, length(which), se.fit=control$se.fit, 0,
- maxitl=control$maxitl, qrank=0, M= M. , n.M = n* M. ,
+ npetc = c(n=n, p = 1+Rank, length(which), se.fit=control$se.fit, 0,
+ maxitl=control$maxitl, qrank = 0, M= M. , n.M = n* M. ,
pbig=sum( ncolBlist.),
- qbig=qbig, dimw= dimw. , dim1U= dim1U. , ierror=0, ldk=ldk)
+ qbig=qbig, dimw= dimw. , dim1U= dim1U. , ierror = 0, ldk=ldk)
warning("20100405; this is new:")
npetc = c(n=nrow(nu1mat), p. =ncol(nu1mat),
q=length(which),
se.fit=control$se.fit, 0,
- control$bf.maxit, qrank=0, M= M. , nbig=nstar, pbig=pbig,
- qbig=qbig, dim2wz= dimw. , dim1U= dim1U. , ierror=0, ldk=ldk,
+ control$bf.maxit, qrank = 0, M= M. , nbig=nstar, pbig=pbig,
+ qbig=qbig, dim2wz= dimw. , dim1U= dim1U. , ierror = 0, ldk=ldk,
contr.sp$maxit, iinfo = 0)
flush.console()
@@ -914,7 +926,7 @@ warning("20100405; this is new:")
assign(".VGAM.CAO.U", ans1$U, envir=VGAM:::VGAMenv) # U
if (ans1$errcode == 0) {
} else {
- cat("warning in calldcaoc: error code =", ans1$errcode, "\n")
+ cat("warning in calldcaoc: error code = ", ans1$errcode, "\n")
flush.console()
}
@@ -931,7 +943,7 @@ warning("20100405; this is new:")
ind9 = ind9[length(ind9)] + (bindex[ii]):(bindex[ii+1]-1)
ans = ans1$bcoeff[ind9]
ans = matrix(ans, ncol=ncolBlist[nwhich[ii]])
- Bspline[[ii]] = new(Class="vsmooth.spline.fit",
+ Bspline[[ii]] = new(Class = "vsmooth.spline.fit",
"Bcoefficients" = ans,
"xmax" = smooth.frame$xmax[ii],
"xmin" = smooth.frame$xmin[ii],
@@ -957,7 +969,7 @@ warning("20100405; this is new:")
df.residual = n*M - qrank - sum(ans1$df - 1),
fitted=ans1$fv,
kindex = ans1$kindex,
- predictors=matrix(ans1$etamat, n, M, byrow=TRUE),
+ predictors=matrix(ans1$etamat, n, M, byrow = TRUE),
wresiduals = ans1$zedd - t(ans1$etamat), # n x M
spar1 = ans1$smopar[1:NOS],
spar2 = if (Rank == 2) ans1$smopar[2*(1:NOS) - 1] else NULL)
@@ -973,7 +985,7 @@ warning("20100405; this is new:")
-setClass(Class="Coef.cao", representation(
+setClass(Class = "Coef.cao", representation(
"Bspline" = "list",
"C" = "matrix",
"Constrained" = "logical",
@@ -1000,18 +1012,21 @@ Coef.cao = function(object,
smallno = 0.05,
...) {
- if (!is.Numeric(epsOptimum, posit=TRUE, allow=1))
+ if (!is.Numeric(epsOptimum, positive = TRUE, allowable.length = 1))
stop("bad input for argument 'epsOptimum'")
- if (!is.Numeric(gridlen, posit=TRUE, integer=TRUE) || gridlen < 5)
+ if (!is.Numeric(gridlen, positive = TRUE, integer.valued = TRUE) ||
+ gridlen < 5)
stop("bad input for argument 'gridlen'")
- if (!is.Numeric(maxgriditer, posit=TRUE, allow=1, int=TRUE) ||
- maxgriditer<3)
+ if (!is.Numeric(maxgriditer, positive = TRUE,
+ allowable.length = 1, integer.valued = TRUE) ||
+ maxgriditer < 3)
stop("bad input for argument 'maxgriditer'")
if (!is.logical(ConstrainedO <- object at control$ConstrainedO))
stop("cannot determine whether the model is constrained or not")
- if (!is.Numeric(smallno, posit=TRUE, allow=1) ||
+ if (!is.Numeric(smallno, positive = TRUE, allowable.length = 1) ||
smallno > 0.5 || smallno < 0.0001)
stop("bad input for argument 'smallno'")
+
ocontrol = object at control
if ((Rank <- ocontrol$Rank) > 2) stop("'Rank' must be 1 or 2")
gridlen = rep(gridlen, length=Rank)
@@ -1030,16 +1045,16 @@ Coef.cao = function(object,
ynames = object at misc$ynames
if (!length(ynames)) ynames = object at misc$predictors.names
if (!length(ynames)) ynames = object at misc$ynames
- if (!length(ynames)) ynames = paste("Y", 1:NOS, sep="")
+ if (!length(ynames)) ynames = paste("Y", 1:NOS, sep = "")
lp.names = object at misc$predictors.names
if (!length(lp.names)) lp.names = NULL
- lv.names = if (Rank == 1) "lv" else paste("lv", 1:Rank, sep="")
+ lv.names = if (Rank == 1) "lv" else paste("lv", 1:Rank, sep = "")
Cmat = object at extra$Cmat # p2 x Rank (provided maxitl > 1)
if (ConstrainedO)
dimnames(Cmat) = list(names(ocontrol$colx2.index), lv.names)
lv.mat = if (ConstrainedO) {
- object at x[,ocontrol$colx2.index,drop=FALSE] %*% Cmat
+ object at x[,ocontrol$colx2.index,drop = FALSE] %*% Cmat
} else {
object at lv
}
@@ -1077,14 +1092,14 @@ Coef.cao = function(object,
((griditer <= maxgriditer) &&
((gridres1 > epsOptimum) || (gridres2 > epsOptimum)))) {
temp = predictcao(object, grid=gridd, sppno=thisSpecies,
- Rank=Rank, deriv=0, MSratio=MSratio)
+ Rank=Rank, deriv = 0, MSratio=MSratio)
yvals = temp$yvals # gridlen-vector
xvals = temp$xvals # gridlen x Rank; gridd
if (length(temp$eta2)) eta2matrix[sppno,1] = temp$eta2
nnn = length(yvals)
index = (1:nnn)[yvals == max(yvals)]
- if (length(index)!=1) warning("could not find a single maximum")
+ if (length(index) != 1) warning("could not find a single maximum")
if (Rank == 2) {
initvalue = rep(xvals[index,], length=Rank) # for optim()
# Make sure initvalue is in the interior
@@ -1116,18 +1131,18 @@ Coef.cao = function(object,
if (Rank == 2) {
# Rank = 2, so use optim(). The above was to get initial values.
- myfun = function(x, object, sppno, Rank=1, deriv=0, MSratio=1) {
+ myfun = function(x, object, sppno, Rank = 1, deriv = 0, MSratio = 1) {
# x is a 2-vector
x = matrix(x, 1, length(x))
- temp = predictcao(object, grid=x, sppno=sppno,
+ temp = predictcao(object, grid = x, sppno=sppno,
Rank=Rank, deriv=deriv, MSratio=MSratio)
temp$yval
}
- answer = optim(initvalue, myfun, gr=NULL, method="L-BFGS-B",
+ answer = optim(initvalue, myfun, gr = NULL, method = "L-BFGS-B",
lower=extents[1,], upper=extents[2,],
control=list(fnscale = -1), # maximize!
object=object, sppno=sppno, Rank=Rank,
- deriv=0, MSratio=MSratio)
+ deriv = 0, MSratio=MSratio)
# Check to see if the soln is @ boundary. If not, assign it.
for(rindex in 1:Rank)
if (abs(answer$par[rindex] - extents[1,rindex]) > smallno &&
@@ -1135,7 +1150,7 @@ Coef.cao = function(object,
optimum[rindex,sppno] = answer$par[rindex]
maximum[sppno] = answer$value
}
- } # end of Rank=2
+ } # end of Rank = 2
} # end of sppno
myetamat = rbind(maximum)
if (MSratio == 2) myetamat = kronecker(myetamat, matrix(1:0, 1, 2))
@@ -1143,7 +1158,7 @@ Coef.cao = function(object,
maximum = c(maximum) # Convert from matrix to vector
names(maximum) = ynames
- ans = new(Class="Coef.cao",
+ ans = new(Class = "Coef.cao",
Bspline = object at Bspline,
Constrained=ConstrainedO,
df1.nl = object at extra$df1.nl,
@@ -1192,7 +1207,7 @@ Coef.cao = function(object,
}
-printCoef.cao = function(object, digits = max(2, options()$digits-2), ...) {
+show.Coef.cao = function(object, digits = max(2, options()$digits-2), ...) {
Rank = object at Rank
NOS = object at NOS
M = object at M
@@ -1201,7 +1216,7 @@ printCoef.cao = function(object, digits = max(2, options()$digits-2), ...) {
cbind(Maximum=object at Maximum) else NULL
optmat = cbind(t(object at Optimum))
dimnames(optmat) = list(dimnames(optmat)[[1]],
- if (Rank > 1) paste("Optimum", dimnames(optmat)[[2]], sep=".")
+ if (Rank > 1) paste("Optimum", dimnames(optmat)[[2]], sep = ".")
else "Optimum")
if ( object at Constrained ) {
@@ -1226,9 +1241,11 @@ printCoef.cao = function(object, digits = max(2, options()$digits-2), ...) {
setMethod("show", "Coef.cao", function(object)
- printCoef.cao(object))
-setMethod("print", "Coef.cao", function(x, ...)
- printCoef.cao(object=x, ...))
+ show.Coef.cao(object))
+
+
+
+
setMethod("coef", "cao", function(object, ...) Coef.cao(object, ...))
setMethod("coefficients", "cao", function(object, ...)
@@ -1239,15 +1256,15 @@ setMethod("Coef", "cao", function(object, ...) Coef.cao(object, ...))
lvplot.cao = function(object,
- add= FALSE, plot.it= TRUE, rugplot = TRUE, y = FALSE,
+ add= FALSE, plot.it = TRUE, rugplot = TRUE, y = FALSE,
type=c("fitted.values", "predictors"),
- xlab=paste("Latent Variable", if (Rank == 1) "" else " 1", sep=""),
- ylab = if (Rank == 1) switch(type, predictors="Predictors",
- fitted.values="Fitted values") else "Latent Variable 2",
+ xlab=paste("Latent Variable", if (Rank == 1) "" else " 1", sep = ""),
+ ylab = if (Rank == 1) switch(type, predictors = "Predictors",
+ fitted.values = "Fitted values") else "Latent Variable 2",
pcex=par()$cex, pcol=par()$col, pch=par()$pch,
llty=par()$lty, lcol=par()$col, llwd=par()$lwd,
label.arg= FALSE, adj.arg=-0.5,
- sites= FALSE, spch=NULL, scol=par()$col, scex=par()$cex,
+ sites= FALSE, spch = NULL, scol=par()$col, scex=par()$cex,
sfont=par()$font,
whichSpecies = NULL,
check.ok = TRUE, ...)
@@ -1283,13 +1300,13 @@ lvplot.cao = function(object,
if (Rank == 1) {
matplot(lvmat,
if ( y && type == "fitted.values")
- object at y[,whichSpecies,drop=FALSE] else
- r.curves[,whichSpecies,drop=FALSE],
- type="n", xlab=xlab, ylab=ylab, ...)
+ object at y[,whichSpecies,drop = FALSE] else
+ r.curves[,whichSpecies,drop = FALSE],
+ type = "n", xlab = xlab, ylab=ylab, ...)
} else { # Rank == 2
matplot(c(Coeflist at Optimum[1,whichSpecies], lvmat[,1]),
c(Coeflist at Optimum[2,whichSpecies], lvmat[,2]),
- type="n", xlab=xlab, ylab=ylab, ...)
+ type = "n", xlab = xlab, ylab=ylab, ...)
}
}
@@ -1327,7 +1344,7 @@ lvplot.cao = function(object,
if (rugplot) rug(xx)
} else {
if (sites) {
- text(lvmat[,1], lvmat[,2], adj=0.5,
+ text(lvmat[,1], lvmat[,2], adj = 0.5,
labels = if (is.null(spch)) dimnames(lvmat)[[1]] else
rep(spch, length=nrow(lvmat)), col=scol, cex=scex, font=sfont)
}
@@ -1365,7 +1382,7 @@ setMethod("lvplot", "cao",
-predict.cao <- function (object, newdata=NULL,
+predict.cao <- function (object, newdata = NULL,
type = c("link", "response", "terms"),
deriv = 0, ...) {
type <- match.arg(type, c("link", "response", "terms"))[1]
@@ -1388,7 +1405,7 @@ predict.cao <- function (object, newdata=NULL,
}
if (!length(newdata)) {
- X <- model.matrixvlm(object, type="lm", ...)
+ X <- model.matrixvlm(object, type = "lm", ...)
offset <- object at offset
tt <- terms(object)
if (!length(object at x))
@@ -1405,7 +1422,7 @@ predict.cao <- function (object, newdata=NULL,
if (nice21 && nrow(X)!=nrow(newdata)) {
as.save = attr(X, "assign")
- X = X[rep(1, nrow(newdata)),,drop=FALSE]
+ X = X[rep(1, nrow(newdata)),,drop = FALSE]
dimnames(X) = list(dimnames(newdata)[[1]], "(Intercept)")
attr(X, "assign") = as.save # Restored
}
@@ -1424,7 +1441,7 @@ predict.cao <- function (object, newdata=NULL,
cancoefs = ccoef(object)
- lvmat = X[,ocontrol$colx2.index,drop=FALSE] %*% cancoefs # n x Rank
+ lvmat = X[,ocontrol$colx2.index,drop = FALSE] %*% cancoefs # n x Rank
Rank = ocontrol$Rank
NOS = ncol(object at y)
@@ -1495,13 +1512,14 @@ setMethod("predict", "cao", function(object, ...)
predict.cao(object, ...))
-predictcao <- function(object, grid, sppno, Rank=1, deriv=0, MSratio=1,
- type="link") {
+predictcao <- function(object, grid, sppno, Rank = 1,
+ deriv = 0, MSratio = 1, type = "link") {
if (type != "link" && type != "terms")
stop("'link' must be \"link\" or \"terms\"")
if (ncol(grid <- as.matrix(grid)) != Rank)
stop("'grid' must have ", Rank, " columns")
- if (!is.Numeric(1+deriv, allow=1, positive=TRUE, integ=TRUE))
+ if (!is.Numeric(1 + deriv, allowable.length = 1,
+ positive = TRUE, integer.valued = TRUE))
stop("'deriv' must be a non-negative integer")
if (type == "terms" && deriv != 0)
stop("'deriv' must be 0 when type=\"terms\"")
@@ -1515,7 +1533,7 @@ predictcao <- function(object, grid, sppno, Rank=1, deriv=0, MSratio=1,
}
for(rindex in 1:Rank) {
temp = temp.b[[rindex]] # temp is of class "vsmooth.spline.fit"
- nlpart = predict(temp, grid[,rindex], deriv=deriv)
+ nlpart = predict(temp, grid[,rindex], deriv = deriv)
yvals = nlpart$y
if (type == "terms") {
answer[,rindex] = yvals
@@ -1559,16 +1577,16 @@ predictcao <- function(object, grid, sppno, Rank=1, deriv=0, MSratio=1,
plot.cao = function(x,
xlab = if (Rank == 1) "Latent Variable" else
paste("Latent Variable", 1:Rank),
- ylab=NULL, residuals.arg=FALSE,
+ ylab = NULL, residuals.arg = FALSE,
pcol=par()$col, pcex=par()$cex, pch=par()$pch,
lcol=par()$col, lwd=par()$lwd, lty=par()$lty,
- add=FALSE,
- main=NULL,
+ add = FALSE,
+ main = NULL,
center.cf = Rank > 1,
WhichRank = 1:Rank,
whichSpecies = NULL, # a numeric or character vector
- rugplot=TRUE, se.arg=FALSE, deriv=0,
- scale=0, ylim=NULL,
+ rugplot = TRUE, se.arg = FALSE, deriv = 0,
+ scale = 0, ylim = NULL,
overlay = FALSE, ...)
{
Rank = x at control$Rank
@@ -1606,7 +1624,7 @@ plot.cao = function(x,
match(whichSpecies[sppno], sppnames) else whichSpecies[sppno]
if (is.na(indexSpecies))
stop("mismatch found in 'whichSpecies'")
- terms.mat = predictcao(object=x, grid=lvmat, type="terms",
+ terms.mat = predictcao(object = x, grid=lvmat, type = "terms",
sppno=indexSpecies, Rank=Rank,
deriv=deriv, MSratio=MSratio)
for(rindex in WhichRank) {
@@ -1621,7 +1639,7 @@ plot.cao = function(x,
ylim.use = if (length(ylim)) ylim else
ylim.scale(range(yvals), scale)
matplot(xvals, yvals, type = "n",
- xlab=xlab[rindex],
+ xlab = xlab[rindex],
ylab = if (length(ylab)) ylab[sppno] else
ifelse(overlay, "Fitted functions", "Fitted function"),
main = if (length(main)) main[sppno] else
@@ -1652,21 +1670,21 @@ setMethod("plot", "cao",
persp.cao = function(x,
- plot.it=TRUE,
- xlim=NULL, ylim=NULL, zlim=NULL, # zlim ignored if Rank == 1
+ plot.it = TRUE,
+ xlim = NULL, ylim = NULL, zlim = NULL, # zlim ignored if Rank == 1
gridlength = if (Rank == 1) 301 else c(51,51),
whichSpecies = NULL,
xlab= if (Rank == 1) "Latent Variable" else "Latent Variable 1",
ylab= if (Rank == 1) "Expected Value" else "Latent Variable 2",
- zlab="Expected value",
+ zlab = "Expected value",
labelSpecies = FALSE, # For Rank == 1 only
stretch = 1.05, # quick and dirty, Rank == 1 only
- main="",
+ main = "",
ticktype = "detailed",
col = if (Rank == 1) par()$col else "white",
lty=par()$lty,
lwd=par()$lwd,
- rugplot=FALSE,
+ rugplot = FALSE,
...) {
object = x # don't like x as the primary argument
coefobj = Coef(object)
@@ -1684,8 +1702,8 @@ persp.cao = function(x,
ylim = if (Rank == 1) c(0, max(fvmat)*stretch) else
range(coefobj at lv[,2])
}
- xlim = rep(xlim, length=2)
- ylim = rep(ylim, length=2)
+ xlim = rep(xlim, length = 2)
+ ylim = rep(ylim, length = 2)
gridlength = rep(gridlength, length=Rank)
lv1 = seq(xlim[1], xlim[2], length=gridlength[1])
lv2 = if (Rank == 2) seq(ylim[1], ylim[2], len=gridlength[2]) else NULL
@@ -1705,7 +1723,7 @@ persp.cao = function(x,
LP = matrix(as.numeric(NA),nrow(lvmat),NOS) # For 1st eta for each spp.
for(sppno in 1:NOS) {
temp = predictcao(object=object, grid=lvmat, sppno=sppno,
- Rank=Rank, deriv=0, MSratio=MSratio)
+ Rank=Rank, deriv = 0, MSratio=MSratio)
LP[,sppno] = temp$yval
}
if (MSratio == 2) {
@@ -1721,8 +1739,8 @@ persp.cao = function(x,
col = rep(col, len=length(whichSpecies.numer))
lty = rep(lty, len=length(whichSpecies.numer))
lwd = rep(lwd, len=length(whichSpecies.numer))
- matplot(lv1, fitvals, xlab=xlab, ylab=ylab,
- type="n", main=main, xlim=xlim, ylim=ylim, ...)
+ matplot(lv1, fitvals, xlab = xlab, ylab=ylab,
+ type = "n", main=main, xlim = xlim, ylim=ylim, ...)
if (rugplot) rug(lv(object))
for(sppno in 1:length(whichSpecies.numer)) {
ptr2 = whichSpecies.numer[sppno] # points to species column
@@ -1751,7 +1769,7 @@ persp.cao = function(x,
if (plot.it)
graphics:::persp.default(lv1, lv2, maxfitted,
zlim=zlim,
- xlab=xlab, ylab=ylab, zlab=zlab,
+ xlab = xlab, ylab=ylab, zlab=zlab,
ticktype = ticktype, col = col, main=main, ...)
}
@@ -1764,7 +1782,7 @@ persp.cao = function(x,
if(!isGeneric("persp"))
setGeneric("persp", function(x, ...) standardGeneric("persp"))
-setMethod("persp", "cao", function(x, ...) persp.cao(x=x, ...))
+setMethod("persp", "cao", function(x, ...) persp.cao(x = x, ...))
@@ -1776,7 +1794,7 @@ lv.cao = function(object, ...) {
if(!isGeneric("lv"))
setGeneric("lv", function(object, ...) standardGeneric("lv"),
- package="VGAM")
+ package = "VGAM")
setMethod("lv", "cao", function(object, ...) lv.cao(object, ...))
@@ -1784,10 +1802,10 @@ if(!isGeneric("lv"))
-setClass(Class="summary.cao",
+setClass(Class = "summary.cao",
representation("misc" = "list",
"call" = "call"),
- contains="Coef.cao")
+ contains = "Coef.cao")
@@ -1810,11 +1828,11 @@ setMethod("summary", "cao", function(object, ...)
-printsummary.cao = function(x, ...) {
+show.summary.cao = function(x, ...) {
cat("\nCall:\n")
dput(x at call)
- printCoef.cao(x, ...)
+ show.Coef.cao(x, ...)
cat("\nNumber of species: ", x at NOS, "\n")
@@ -1827,24 +1845,25 @@ printsummary.cao = function(x, ...) {
invisible(x)
}
-setMethod("print", "summary.cao",
- function(x, ...)
- invisible(printsummary.cao(x, ...)))
+
+
+
setMethod("show", "summary.cao",
function(object)
- invisible(printsummary.cao(object)))
+ show.summary.cao(object))
ccoef.cao = function(object, ...) {
- Coef(object, ...)@C
+ Coef(object, ...)@C
}
ccoef.Coef.cao = function(object, ...) {
- if (length(list(...))) warning("Too late! Ignoring the extra arguments")
- object at C
+ if (length(list(...)))
+ warning("Too late! Ignoring the extra arguments")
+ object at C
}
@@ -1881,8 +1900,12 @@ setMethod("Tol", "cao", function(object, ...)
-setMethod("show", "cao", function(object) print.vgam(object))
-setMethod("print", "cao", function(x, ...) print.vgam(x, ...))
+
+
+setMethod("show", "cao", function(object) show.vgam(object))
+
+
+
diff --git a/R/coef.vlm.q b/R/coef.vlm.q
index 12d03dd..439cf8d 100644
--- a/R/coef.vlm.q
+++ b/R/coef.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -7,7 +7,7 @@
-coefvlm <- function(object, matrix.out=FALSE, label=TRUE) {
+coefvlm <- function(object, matrix.out = FALSE, label = TRUE) {
ans <- object at coefficients
if (!label)
@@ -21,9 +21,9 @@ coefvlm <- function(object, matrix.out=FALSE, label=TRUE) {
Blist <- object at constraints
if (all(trivial.constraints(Blist) == 1)) {
- Bmat <- matrix(ans, nrow=ncolx, ncol=M, byrow=TRUE)
+ Bmat <- matrix(ans, nrow=ncolx, ncol = M, byrow = TRUE)
} else {
- Bmat <- matrix(as.numeric(NA), nrow=ncolx, ncol=M)
+ Bmat <- matrix(as.numeric(NA), nrow = ncolx, ncol = M)
if (!matrix.out)
return(ans)
diff --git a/R/cqo.R b/R/cqo.R
index c83b268..89ce1f7 100644
--- a/R/cqo.R
+++ b/R/cqo.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -29,7 +29,7 @@ cqo <- function(formula,
if (missing(data))
data <- environment(formula)
- mf <- match.call(expand=FALSE)
+ mf <- match.call(expand.dots = FALSE)
mf$family <- mf$method <- mf$model <- mf$x.arg <- mf$y.arg <-
mf$control <- mf$contrasts <- mf$constraints <- mf$extra <- NULL
mf$coefstart <- mf$etastart <- mf$... <- NULL
diff --git a/R/cqo.fit.q b/R/cqo.fit.q
index f131735..83e9aae 100644
--- a/R/cqo.fit.q
+++ b/R/cqo.fit.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -584,7 +584,8 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
NOS = ncol(ymat)
p2 = ncol(X2)
if (NOS < 2*Rank) {
- ans=crow1C(matrix(rnorm(p2*Rank, sd=0.02), p2, Rank), Crow1positive)
+ ans=crow1C(matrix(rnorm(p2 * Rank, sd = 0.02), p2, Rank),
+ Crow1positive)
eval(sd.scale.X2.expression)
if (NOS == 1) {
eval(print.CQO.expression)
@@ -626,7 +627,7 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
ans2 = if (Rank > 1)
rrr.normalize(rrcontrol=temp.control, A=alt$A,
C=alt$C, x=cbind(X1, X2)) else alt
- ans = crow1C(ans2$C, rep(Crow1positive, len=effrank))
+ ans = crow1C(ans2$C, rep(Crow1positive, length.out = effrank))
Rank.save = Rank
Rank = effrank
@@ -643,7 +644,8 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
tmp = vlm.wfit(xmat=X1, zmat=etamat, Blist=NULL, U=U,
matrix.out=TRUE,
is.vlmX=FALSE, rss=TRUE, qr=FALSE, xij=xij)
- ans = crow1C(as.matrix(tmp$resid), rep(Crow1positive, len=effrank))
+ ans = crow1C(as.matrix(tmp$resid),
+ rep(Crow1positive, length.out = effrank))
if (effrank < Rank) {
ans = cbind(ans, ans.save[,-(1:effrank)]) # ans is better
}
@@ -658,7 +660,7 @@ cqo.fit <- function(x, y, w=rep(1, length(x[, 1])),
for(ii in 1:Rank)
ans[,ii] = ans[,ii] * isdlv[ii] / actualSD[ii]
}
- ans = crow1C(ans, rep(Crow1positive, len=Rank))
+ ans = crow1C(ans, rep(Crow1positive, length.out = Rank))
dimnames(ans) = list(dimnames(X1)[[1]],
if (Rank == 1) "lv" else paste("lv", 1:Rank, sep=""))
if (trace) {
@@ -818,17 +820,19 @@ cqo.end.expression = expression({
})
-crow1C = function(cmat, crow1positive=rep(TRUE, len=ncol(cmat)),
- amat=NULL) {
- if (!is.logical(crow1positive) || length(crow1positive) != ncol(cmat))
- stop("bad input in crow1C")
- for(LV in 1:ncol(cmat))
- if (( crow1positive[LV] && cmat[1,LV] < 0) ||
- (!crow1positive[LV] && cmat[1,LV] > 0)) {
- cmat[,LV] = -cmat[,LV]
- if (length(amat)) amat[,LV] = -amat[,LV]
- }
- if (length(amat)) list(cmat=cmat, amat=amat) else cmat
+crow1C = function(cmat,
+ crow1positive = rep(TRUE, length.out = ncol(cmat)),
+ amat = NULL) {
+ if (!is.logical(crow1positive) || length(crow1positive) != ncol(cmat))
+ stop("bad input in crow1C")
+
+ for(LV in 1:ncol(cmat))
+ if (( crow1positive[LV] && cmat[1,LV] < 0) ||
+ (!crow1positive[LV] && cmat[1,LV] > 0)) {
+ cmat[,LV] = -cmat[,LV]
+ if (length(amat)) amat[,LV] = -amat[,LV]
+ }
+ if (length(amat)) list(cmat=cmat, amat=amat) else cmat
}
@@ -854,7 +858,7 @@ printqrrvglm <- function(x, ...)
cat("\n")
if (length(deviance(x)))
- cat("Residual Deviance:", format(deviance(x)), "\n")
+ cat("Residual deviance:", format(deviance(x)), "\n")
if (FALSE && length(x at criterion)) {
ncrit <- names(x at criterion)
@@ -870,17 +874,20 @@ printqrrvglm <- function(x, ...)
setMethod("Coef", "qrrvglm", function(object, ...)
Coef.qrrvglm(object, ...))
+
setMethod("coef", "qrrvglm", function(object, ...)
Coef.qrrvglm(object, ...))
setMethod("coefficients", "qrrvglm", function(object, ...)
Coef.qrrvglm(object, ...))
+
if (!isGeneric("deviance"))
setGeneric("deviance", function(object, ...)
standardGeneric("deviance"))
setMethod("deviance", "qrrvglm", function(object,...)
object at criterion$deviance)
+
setMethod("fitted", "qrrvglm", function(object, ...)
fittedvlm(object))
setMethod("fitted.values", "qrrvglm", function(object, ...)
@@ -888,7 +895,11 @@ setMethod("fitted.values", "qrrvglm", function(object, ...)
-setMethod("print", "qrrvglm", function(x, ...) printqrrvglm(x, ...))
+
+
+
+
+
setMethod("show", "qrrvglm", function(object) printqrrvglm(object))
@@ -899,3 +910,4 @@ setMethod("show", "qrrvglm", function(object) printqrrvglm(object))
+
diff --git a/R/deviance.vlm.q b/R/deviance.vlm.q
index 938af81..6c8c194 100644
--- a/R/deviance.vlm.q
+++ b/R/deviance.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/effects.vglm.q b/R/effects.vglm.q
index c8e6950..9f00083 100644
--- a/R/effects.vglm.q
+++ b/R/effects.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/family.aunivariate.R b/R/family.aunivariate.R
index 4fc5695..d079f5a 100644
--- a/R/family.aunivariate.R
+++ b/R/family.aunivariate.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -75,7 +75,7 @@ pkumar = function(q, shape1, shape2) {
if (mode(lshape2) != "character" && mode(lshape2) != "name")
lshape2 <- as.character(substitute(lshape2))
if (length(ishape1) &&
- (!is.Numeric(ishape1, allow = 1, positive = TRUE)))
+ (!is.Numeric(ishape1, allowable.length = 1, positive = TRUE)))
stop("bad input for argument 'ishape1'")
if (length(ishape2) && !is.Numeric(ishape2))
stop("bad input for argument 'ishape2'")
@@ -83,9 +83,9 @@ pkumar = function(q, shape1, shape2) {
if (!is.list(eshape1)) eshape1 = list()
if (!is.list(eshape2)) eshape2 = list()
- if (!is.Numeric(tol12, allow = 1, posit = TRUE))
+ if (!is.Numeric(tol12, allowable.length = 1, positive = TRUE))
stop("bad input for argument 'tol12'")
- if (!is.Numeric(grid.shape1, allow = 2, posit = TRUE))
+ if (!is.Numeric(grid.shape1, allowable.length = 2, positive = TRUE))
stop("bad input for argument 'grid.shape1'")
new("vglmff",
@@ -236,7 +236,7 @@ drice <- function(x, vee, sigma, log = FALSE) {
rrice <- function(n, vee, sigma) {
- if (!is.Numeric(n, integ = TRUE, allow = 1))
+ if (!is.Numeric(n, integer.valued = TRUE, allowable.length = 1))
stop("bad input for argument 'n'")
theta <- 1 # any number
X <- rnorm(n, mean = vee * cos(theta), sd = sigma)
@@ -266,7 +266,7 @@ riceff.control <- function(save.weight = TRUE, ...) {
stop("bad input for argument 'isigma'")
if (!is.list(evee)) evee = list()
if (!is.list(esigma)) esigma = list()
- if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 50)
+ if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE) || nsimEIM <= 50)
stop("'nsimEIM' should be an integer greater than 50")
new("vglmff",
@@ -388,7 +388,9 @@ dskellam = function(x, mu1, mu2, log = FALSE) {
stop("bad input for 'log.arg'")
L = max(length(x), length(mu1), length(mu2))
- x = rep(x, len = L); mu1 = rep(mu1, len = L); mu2 = rep(mu2, len = L);
+ x = rep(x, len = L);
+ mu1 = rep(mu1, len = L);
+ mu2 = rep(mu2, len = L);
ok2 <- is.finite(mu1) && is.finite(mu2) & (mu1 >= 0) & (mu2 >= 0)
ok3 <- (mu1 == 0) & (mu2 > 0)
ok4 <- (mu1 > 0) & (mu2 == 0)
@@ -396,14 +398,14 @@ dskellam = function(x, mu1, mu2, log = FALSE) {
if (log.arg) {
ans = -mu1 - mu2 + 2 * sqrt(mu1*mu2) +
0.5 * x * log(mu1) - 0.5 * x * log(mu2) +
- log(besselI(2 * sqrt(mu1*mu2), nu = x, expon = TRUE))
+ log(besselI(2 * sqrt(mu1*mu2), nu = x, expon.scaled = TRUE))
ans[ok3] = dpois(x = -x[ok3], lambda = mu2[ok3], log = TRUE)
ans[ok4] = dpois(x = -x[ok4], lambda = mu1[ok4], log = TRUE)
ans[ok5] = dpois(x = x[ok5], lambda = 0.0, log = TRUE)
ans[x != round(x)] = log(0.0)
} else {
ans = (mu1/mu2)^(x/2) * exp(-mu1-mu2 + 2 * sqrt(mu1*mu2)) *
- besselI(2 * sqrt(mu1*mu2), nu = x, expon = TRUE)
+ besselI(2 * sqrt(mu1*mu2), nu = x, expon.scaled = TRUE)
ans[ok3] = dpois(x = -x[ok3], lambda = mu2[ok3])
ans[ok4] = dpois(x = -x[ok4], lambda = mu1[ok4])
ans[ok5] = dpois(x = x[ok5], lambda = 0.0)
@@ -444,7 +446,7 @@ skellam.control <- function(save.weight = TRUE, ...) {
stop("bad input for argument 'imu2'")
if (!is.list(emu1)) emu1 = list()
if (!is.list(emu2)) emu2 = list()
- if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) ||
+ if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE) ||
nsimEIM <= 50)
stop("'nsimEIM' should be an integer greater than 50")
@@ -457,7 +459,7 @@ skellam.control <- function(save.weight = TRUE, ...) {
"Variance: mu1+mu2"),
constraints = eval(substitute(expression({
constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints,
- int = TRUE)
+ intercept.apply = TRUE)
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list( .parallel=parallel, .zero = zero ))),
initialize = eval(substitute(expression({
@@ -582,7 +584,7 @@ dyules = function(x, rho, log = FALSE) {
ryules = function(n, rho) {
- if (!is.Numeric(n, integ = TRUE, allow = 1))
+ if (!is.Numeric(n, integer.valued = TRUE, allowable.length = 1))
stop("bad input for argument 'n'")
rgeom(n, prob = exp(-rexp(n, rate=rho))) + 1
}
@@ -606,12 +608,12 @@ yulesimon.control <- function(save.weight = TRUE, ...) {
yulesimon = function(link = "loge", earg = list(), irho = NULL, nsimEIM = 200)
{
- if (length(irho) && !is.Numeric(irho, positi = TRUE))
+ if (length(irho) && !is.Numeric(irho, positive = TRUE))
stop("argument 'irho' must be > 0")
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
- if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) ||
+ if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE) ||
nsimEIM <= 50)
stop("'nsimEIM' should be an integer greater than 50")
@@ -722,7 +724,7 @@ pslash <- function(q, mu = 0, sigma = 1){
}
rslash <- function (n, mu = 0, sigma = 1){
- if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
+ if (!is.Numeric(n, positive = TRUE, integer.valued = TRUE, allowable.length = 1))
stop("bad input for argument 'n'")
if (any(sigma <= 0))
stop("argument 'sigma' must be positive")
@@ -745,18 +747,18 @@ slash.control <- function(save.weight = TRUE, ...)
lmu = as.character(substitute(lmu))
if (mode(lsigma) != "character" && mode(lsigma) != "name")
lsigma = as.character(substitute(lsigma))
- if (length(isigma) && !is.Numeric(isigma, posit = TRUE))
+ if (length(isigma) && !is.Numeric(isigma, positive = TRUE))
stop("'isigma' must be > 0")
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(emu)) emu = list()
if (!is.list(esigma)) esigma = list()
- if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 50)
+ if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE) || nsimEIM <= 50)
stop("'nsimEIM' should be an integer greater than 50")
- if (!is.Numeric(iprobs, posit = TRUE) || max(iprobs) >= 1 ||
+ if (!is.Numeric(iprobs, positive = TRUE) || max(iprobs) >= 1 ||
length(iprobs)!=2)
stop("bad input for argument 'iprobs'")
- if (!is.Numeric(smallno, posit = TRUE) || smallno > 0.1)
+ if (!is.Numeric(smallno, positive = TRUE) || smallno > 0.1)
stop("bad input for argument 'smallno'")
new("vglmff",
@@ -904,12 +906,12 @@ dnefghs = function(x, tau, log = FALSE) {
nefghs <- function(link = "logit", earg = list(), itau = NULL,
imethod = 1)
{
- if (length(itau) && !is.Numeric(itau, positi = TRUE) || any(itau >= 1))
+ if (length(itau) && !is.Numeric(itau, positive = TRUE) || any(itau >= 1))
stop("argument 'itau' must be in (0,1)")
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 2)
stop("argument 'imethod' must be 1 or 2")
@@ -990,10 +992,10 @@ dlogF = function(x, shape1, shape2, log = FALSE) {
ishape1 = NULL, ishape2 = 1,
imethod = 1)
{
- if (length(ishape1) && !is.Numeric(ishape1, positi = TRUE))
+ if (length(ishape1) && !is.Numeric(ishape1, positive = TRUE))
stop("argument 'ishape1' must be positive")
if ( # length(ishape2) &&
- !is.Numeric(ishape2, positi = TRUE))
+ !is.Numeric(ishape2, positive = TRUE))
stop("argument 'ishape2' must be positive")
if (mode(lshape1) != "character" && mode(lshape1) != "name")
lshape1 = as.character(substitute(lshape1))
@@ -1001,7 +1003,8 @@ dlogF = function(x, shape1, shape2, log = FALSE) {
lshape2 = as.character(substitute(lshape2))
if (!is.list(eshape1)) eshape1 = list()
if (!is.list(eshape2)) eshape2 = list()
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 2)
stop("argument 'imethod' must be 1 or 2")
@@ -1101,7 +1104,7 @@ dlogF = function(x, shape1, shape2, log = FALSE) {
dbenf <- function(x, ndigits = 1, log = FALSE) {
- if (!is.Numeric(ndigits, allow = 1, posit = TRUE, integ = TRUE) ||
+ if (!is.Numeric(ndigits, allowable.length = 1, positive = TRUE, integer.valued = TRUE) ||
ndigits > 2)
stop("argument 'ndigits' must be 1 or 2")
lowerlimit <- ifelse(ndigits == 1, 1, 10)
@@ -1120,13 +1123,13 @@ dbenf <- function(x, ndigits = 1, log = FALSE) {
rbenf <- function(n, ndigits = 1) {
- if (!is.Numeric(ndigits, allow = 1, posit = TRUE, integ = TRUE) ||
+ if (!is.Numeric(ndigits, allowable.length = 1, positive = TRUE, integer.valued = TRUE) ||
ndigits > 2)
stop("argument 'ndigits' must be 1 or 2")
lowerlimit <- ifelse(ndigits == 1, 1, 10)
upperlimit <- ifelse(ndigits == 1, 9, 99)
use.n <- if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
+ if (!is.Numeric(n, integer.valued = TRUE, allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
myrunif <- runif(use.n)
@@ -1141,7 +1144,7 @@ rbenf <- function(n, ndigits = 1) {
pbenf <- function(q, ndigits = 1, log.p = FALSE) {
- if (!is.Numeric(ndigits, allow = 1, posit = TRUE, integ = TRUE) ||
+ if (!is.Numeric(ndigits, allowable.length = 1, positive = TRUE, integer.valued = TRUE) ||
ndigits > 2)
stop("argument 'ndigits' must be 1 or 2")
lowerlimit <- ifelse(ndigits == 1, 1, 10)
@@ -1160,7 +1163,7 @@ pbenf <- function(q, ndigits = 1, log.p = FALSE) {
qbenf <- function(p, ndigits = 1) {
- if (!is.Numeric(ndigits, allow = 1, posit = TRUE, integ = TRUE) ||
+ if (!is.Numeric(ndigits, allowable.length = 1, positive = TRUE, integer.valued = TRUE) ||
ndigits > 2)
stop("argument 'ndigits' must be 1 or 2")
lowerlimit <- ifelse(ndigits == 1, 1, 10)
diff --git a/R/family.basics.R b/R/family.basics.R
index a3ce7a4..60af74d 100644
--- a/R/family.basics.R
+++ b/R/family.basics.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -228,7 +228,7 @@ process.constraints <- function(constraints, x, M,
}
if (is.null(names(constraints)))
- names(constraints) <- rep(nasgn, length=lenconstraints)
+ names(constraints) <- rep(nasgn, length.out = lenconstraints)
temp <- if (!is.R()) list() else {
junk <- vector("list", length(nasgn))
@@ -355,25 +355,27 @@ add.constraints <- function(constraints, new.constraints,
-iam <- function(j, k, M, hbw = M, both = FALSE, diagonal = TRUE)
+iam <- function(j, k, M, hbw = M, both = FALSE, diag = TRUE)
{
+
+
jay <- j
kay <- k
if (M == 1)
- if (!diagonal) stop("cannot handle this")
+ if (!diag) stop("cannot handle this")
if (M == 1)
if (both) return(list(row.index = 1, col.index = 1)) else return(1)
- upper <- if (diagonal) M else M-1
+ upper <- if (diag) M else M-1
i2 <- as.list(upper:1)
i2 <- lapply(i2, seq)
i2 <- unlist(i2)
i1 <- matrix(1:M, M, M)
- i1 <- if (diagonal) c(i1[row(i1) >= col(i1)]) else
+ i1 <- if (diag) c(i1[row(i1) >= col(i1)]) else
c(i1[row(i1) > col(i1)])
@@ -602,7 +604,7 @@ procVec <- function(vec, yn, Default) {
default = vec
}
- answer = rep(default, len = length(yn))
+ answer = rep(default, length.out = length(yn))
names(answer) = yn
if (named) {
nvec2 = nvec[nvec != ""]
@@ -697,7 +699,7 @@ qnupdate <- function(w, wzold, dderiv, deta, M, keeppd = TRUE,
dderiv = cbind(dderiv)
deta = cbind(deta)
}
- Bs = mux22(t(wzold), deta, M=M, upper = FALSE, as.mat = TRUE) # n x M
+ Bs = mux22(t(wzold), deta, M=M, upper = FALSE, as.matrix = TRUE) # n x M
sBs = c( (deta * Bs) %*% rep(1, M) ) # should have positive values
sy = c( (dderiv * deta) %*% rep(1, M) )
wznew = wzold
@@ -729,7 +731,7 @@ qnupdate <- function(w, wzold, dderiv, deta, M, keeppd = TRUE,
mbesselI0 <- function(x, deriv.arg = 0) {
- if (!is.Numeric(deriv.arg, allow = 1, integer = TRUE, posit = TRUE) &&
+ if (!is.Numeric(deriv.arg, allowable.length = 1, integer.valued = TRUE, positive = TRUE) &&
deriv.arg != 0)
stop("argument 'deriv.arg' must be a single non-negative integer")
if (!(deriv.arg == 0 || deriv.arg == 1 || deriv.arg == 2))
@@ -833,20 +835,25 @@ getfromVGAMenv <- function(varname, prefix="") {
}
-lerch <- function(x, s, v, tolerance=1.0e-10, iter=100) {
+lerch <- function(x, s, v, tolerance = 1.0e-10, iter = 100) {
if (!is.Numeric(x) || !is.Numeric(s) || !is.Numeric(v))
- stop("bad input in x, s, and/or v")
+ stop("bad input in x, s, and/or v")
if (is.complex(c(x,s,v)))
- stop("complex arguments not allowed in x, s and v")
- if (!is.Numeric(tolerance, allow=1, posi = TRUE) || tolerance > 0.01)
- stop("bad input for argument 'tolerance'")
- if (!is.Numeric(iter, allow=1, integ = TRUE, posi = TRUE))
- stop("bad input for argument 'iter'")
+ stop("complex arguments not allowed in x, s and v")
+ if (!is.Numeric(tolerance, allowable.length = 1, positive = TRUE) ||
+ tolerance > 0.01)
+ stop("bad input for argument 'tolerance'")
+ if (!is.Numeric(iter, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'iter'")
+
L = max(length(x), length(s), length(v))
- x = rep(x, length=L); s = rep(s, length=L); v = rep(v, length=L);
+ x = rep(x, length.out = L);
+ s = rep(s, length.out = L);
+ v = rep(v, length.out = L);
xok = abs(x) < 1 & !(v <= 0 & v == round(v))
x[!xok] = 0 # Fix this later
- ans = dotC(name="lerchphi123", err=integer(L), as.integer(L),
+ ans = dotC(name = "lerchphi123", err = integer(L), as.integer(L),
as.double(x), as.double(s), as.double(v),
acc=as.double(tolerance), result=double(L),
as.integer(iter))
@@ -870,7 +877,7 @@ negzero.expression <- expression({
bigUniqInt <- 1080
zneg_index <- if (length(negdotzero)) {
- if (!is.Numeric(-negdotzero, posit = TRUE, integ = TRUE) ||
+ if (!is.Numeric(-negdotzero, positive = TRUE, integer.valued = TRUE) ||
max(-negdotzero) > Musual)
stop("bad input for argument 'zero'")
@@ -893,5 +900,18 @@ negzero.expression <- expression({
+ is.empty.list = function(mylist) {
+ is.list(mylist) &&
+ length(unlist(mylist)) == 0
+ }
+
+
+
+
+
+
+
+
+
diff --git a/R/family.binomial.R b/R/family.binomial.R
index 0f9c90e..2ba8ca1 100644
--- a/R/family.binomial.R
+++ b/R/family.binomial.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -86,15 +86,18 @@ betabinomial.control <- function(save.weight = TRUE, ...)
if (!is.list(emu )) emu <- list()
if (!is.list(erho)) erho <- list()
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, positive = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 4)
stop("argument 'imethod' must be 1, 2, 3 or 4")
- if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
- shrinkage.init > 1)
+ if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+ shrinkage.init < 0 ||
+ shrinkage.init > 1)
stop("bad input for argument 'shrinkage.init'")
if (!is.null(nsimEIM)) {
- if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE))
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE))
stop("bad input for argument 'nsimEIM'")
if (nsimEIM <= 10)
warning("'nsimEIM' should be an integer greater than 10, say")
@@ -163,10 +166,10 @@ betabinomial.control <- function(save.weight = TRUE, ...)
nvec = if (is.numeric(extra$orig.w))
round(w / extra$orig.w) else round(w),
mustart = mustart.use))
- init.rho <- if (is.Numeric( .irho )) rep( .irho, length=n) else
- rep(try.this, len = n)
- etastart <- cbind(theta2eta(mustart.use, .lmu, earg = .emu),
- theta2eta(init.rho, .lrho, earg = .erho))
+ init.rho <- if (is.Numeric( .irho )) rep( .irho , length = n) else
+ rep(try.this, length = n)
+ etastart <- cbind(theta2eta(mustart.use, .lmu , earg = .emu),
+ theta2eta(init.rho, .lrho , earg = .erho))
mustart <- NULL # Since etastart has been computed.
}
}), list( .lmu = lmu, .lrho = lrho,
@@ -333,7 +336,8 @@ dbinom2.or = function(mu1,
stop("bad input for argument 'mu2'")
if (!is.Numeric(oratio, positive = TRUE))
stop("bad input for argument 'oratio'")
- if (!is.Numeric(tol, positive = TRUE, allow = 1) || tol > 0.1)
+ if (!is.Numeric(tol, positive = TRUE, allowable.length = 1) ||
+ tol > 0.1)
stop("bad input for argument 'tol'")
if (exchangeable && max(abs(mu1 - mu2)) > 0.00001)
stop("argument 'exchangeable' is TRUE but 'mu1' and 'mu2' differ")
@@ -368,7 +372,8 @@ rbinom2.or = function(n, mu1,
ErrorCheck = TRUE)
{
if (ErrorCheck) {
- if (!is.Numeric(n, integer = TRUE, posit = TRUE, allow = 1))
+ if (!is.Numeric(n, integer.valued = TRUE, positive = TRUE,
+ allowable.length = 1))
stop("bad input for argument 'n'")
if (!is.Numeric(mu1, positive = TRUE) || max(mu1) >= 1)
stop("bad input for argument 'mu1'")
@@ -376,31 +381,34 @@ rbinom2.or = function(n, mu1,
stop("bad input for argument 'mu2'")
if (!is.Numeric(oratio, positive = TRUE))
stop("bad input for argument 'oratio'")
- if (!is.Numeric(tol, positive = TRUE, allow = 1) || tol > 0.1)
+ if (!is.Numeric(tol, positive = TRUE, allowable.length = 1) ||
+ tol > 0.1)
stop("bad input for argument 'tol'")
if (exchangeable && max(abs(mu1 - mu2)) > 0.00001)
stop("argument 'exchangeable' is TRUE but 'mu1' and 'mu2' differ")
}
- dmat = dbinom2.or(mu1=mu1, mu2=mu2, oratio=oratio, exchang=exchangeable,
- tol=tol, ErrorCheck=ErrorCheck)
-
- answer = matrix(0, n, 2,
- dimnames = list(NULL, if (twoCols) colnames else NULL))
- yy = runif(n)
- cs1 = dmat[,"00"] + dmat[,"01"]
- cs2 = cs1 + dmat[,"10"]
- index = (dmat[,"00"] < yy) & (yy <= cs1)
- answer[index,2] = 1
- index = (cs1 < yy) & (yy <= cs2)
- answer[index,1] = 1
- index = (yy > cs2)
- answer[index,] = 1
- if (twoCols) answer else {
- answer4 = matrix(0, n, 4, dimnames = list(NULL, colnames))
- answer4[cbind(1:n, 1 + 2*answer[,1] + answer[,2])] = 1
- answer4
- }
+ dmat = dbinom2.or(mu1 = mu1, mu2 = mu2, oratio = oratio,
+ exchangeable = exchangeable,
+ tol = tol, ErrorCheck = ErrorCheck)
+
+ answer = matrix(0, n, 2,
+ dimnames = list(NULL,
+ if (twoCols) colnames else NULL))
+ yy = runif(n)
+ cs1 = dmat[,"00"] + dmat[,"01"]
+ cs2 = cs1 + dmat[,"10"]
+ index = (dmat[,"00"] < yy) & (yy <= cs1)
+ answer[index,2] = 1
+ index = (cs1 < yy) & (yy <= cs2)
+ answer[index,1] = 1
+ index = (yy > cs2)
+ answer[index,] = 1
+ if (twoCols) answer else {
+ answer4 = matrix(0, n, 4, dimnames = list(NULL, colnames))
+ answer4[cbind(1:n, 1 + 2*answer[, 1] + answer[, 2])] = 1
+ answer4
+ }
}
@@ -421,11 +429,13 @@ rbinom2.or = function(n, mu1,
lmu2 = as.character(substitute(lmu2))
if (mode(loratio) != "character" && mode(loratio) != "name")
loratio = as.character(substitute(loratio))
+
if (is.logical(exchangeable) && exchangeable && ((lmu1 != lmu2) ||
!all.equal(emu1, emu2)))
stop("exchangeable = TRUE but marginal links are not equal")
- if (!is.Numeric(tol, positive = TRUE, allow = 1) || tol > 0.1)
+ if (!is.Numeric(tol, positive = TRUE, allowable.length = 1) || tol > 0.1)
stop("bad input for argument 'tol'")
+
if (!is.list(emu1)) emu1 = list()
if (!is.list(emu2)) emu2 = list()
if (!is.list(eoratio)) eoratio = list()
@@ -524,7 +534,7 @@ rbinom2.or = function(n, mu1,
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, docheck = FALSE))
+ log = TRUE, dochecking = FALSE))
}
}, list( .morerobust = morerobust ))),
@@ -593,7 +603,7 @@ dbinom2.rho = function(mu1,
mu2 = if (exchangeable) mu1 else stop("'mu2' not specified"),
rho=0,
exchangeable = FALSE,
- colnames=c("00", "01", "10", "11"),
+ colnames = c("00", "01", "10", "11"),
ErrorCheck = TRUE)
{
if (ErrorCheck) {
@@ -631,7 +641,7 @@ rbinom2.rho = function(n, mu1,
ErrorCheck = TRUE)
{
if (ErrorCheck) {
- if (!is.Numeric(n, integer = TRUE, positive = TRUE, allow = 1))
+ if (!is.Numeric(n, integer.valued = TRUE, positive = TRUE, allowable.length = 1))
stop("bad input for argument 'n'")
if (!is.Numeric(mu1, positive = TRUE) || max(mu1) >= 1)
stop("bad input for argument 'mu1'")
@@ -643,8 +653,9 @@ rbinom2.rho = function(n, mu1,
stop("argument 'exchangeable' is TRUE but 'mu1' and 'mu2' differ")
}
- dmat = dbinom2.rho(mu1=mu1, mu2=mu2, rho=rho, exchang=exchangeable,
- ErrorCheck=ErrorCheck)
+ dmat = dbinom2.rho(mu1 = mu1, mu2 = mu2, rho = rho,
+ exchangeable = exchangeable,
+ ErrorCheck = ErrorCheck)
answer = matrix(0, n, 2,
dimnames = list(NULL, if (twoCols) colnames else NULL))
@@ -688,14 +699,18 @@ binom2.rho.control <- function(save.weight = TRUE, ...)
if (!is.list(erho)) erho = list()
lmu12 = "probit"
emu12 = list()
+
if (is.Numeric(nsimEIM)) {
- if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE))
+ if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE))
stop("bad input for argument 'nsimEIM'")
if (nsimEIM <= 100)
warning("'nsimEIM' should be an integer greater than 100")
}
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
- imethod > 2) stop("argument 'imethod' must be 1 or 2")
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
new("vglmff",
blurb = c("Bivariate probit model\n",
@@ -749,9 +764,11 @@ binom2.rho.control <- function(save.weight = TRUE, ...)
mu1.init = fitted(glm1.fit)
mu2.init = fitted(glm2.fit)
} else if ( .imethod == 2) {
- mu1.init = if (is.Numeric( .imu1 )) rep( .imu1, len = n) else
+ mu1.init = if (is.Numeric( .imu1 ))
+ rep( .imu1, length = n) else
mu[,3] + mu[,4]
- mu2.init = if (is.Numeric( .imu2 )) rep( .imu2, len = n) else
+ mu2.init = if (is.Numeric( .imu2 ))
+ rep( .imu2, length = n) else
mu[,2] + mu[,4]
} else {
stop("bad value for argument 'imethod'")
@@ -776,7 +793,7 @@ binom2.rho.control <- function(save.weight = TRUE, ...)
sum((if (is.numeric(extraargs$orig.w)) extraargs$orig.w else 1) *
dmultinomial(x = ycounts, size = nvec, prob = mumat,
- log = TRUE, docheck = FALSE))
+ log = TRUE, dochecking = FALSE))
}
rho.grid = seq(-0.95, 0.95, len=31)
try.this = getMaxMin(rho.grid, objfun=binom2.rho.Loglikfun,
@@ -840,7 +857,7 @@ binom2.rho.control <- function(save.weight = TRUE, ...)
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, docheck = FALSE))
+ log = TRUE, dochecking = FALSE))
}
}, list( .erho = erho ))),
vfamily = c("binom2.rho", "binom2"),
@@ -954,9 +971,10 @@ pnorm2 <- function(ah, ak, r) {
ans <- ah
size <- length(ah)
singler <- ifelse(length(r) == 1, 1, 0)
- dotC(name = "pnorm2", ah=as.double(-ah), ak=as.double(-ak), r=as.double(r),
- size=as.integer(size), singler=as.integer(singler),
- ans=as.double(ans))$ans
+ dotC(name = "pnorm2", ah=as.double(-ah), ak=as.double(-ak),
+ r=as.double(r),
+ size=as.integer(size), singler=as.integer(singler),
+ ans=as.double(ans))$ans
}
@@ -968,8 +986,8 @@ my.dbinom <- function(x,
prob = stop("no 'prob' argument"))
{
- exp( lgamma(size+1) - lgamma(size-x+1) - lgamma(x+1) +
- x * log(prob/(1-prob)) + size * log1p(-prob) )
+ exp( lgamma(size + 1) - lgamma(size - x +1) - lgamma(x + 1) +
+ x * log(prob / (1 - prob)) + size * log1p(-prob) )
}
@@ -978,8 +996,10 @@ my.dbinom <- function(x,
{
if (any(prob <= 0 || prob >= 1))
stop("some values of prob out of range")
- if (!missing(link)) link <- as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+
+ if (mode(link) != "character" && mode(link) != "name")
+ link <- as.character(substitute(link))
+ if (!is.list(earg)) earg = list()
new("vglmff",
blurb = c("Binomial with n unknown, prob known (prob = ",prob,")\n",
@@ -1046,7 +1066,7 @@ my.dbinom <- function(x,
if (!is.Numeric(x))
stop("bad input for argument 'x'")
- if (!is.Numeric(size, integer = TRUE))
+ if (!is.Numeric(size, integer.valued = TRUE))
stop("bad input for argument 'size'")
if (any(shape1 < 0, na.rm = TRUE))
stop("negative values for argument 'shape1' not allowed")
@@ -1105,7 +1125,7 @@ my.dbinom <- function(x,
if (!is.Numeric(q))
stop("bad input for argument 'q'")
- if (!is.Numeric(size, integer = TRUE))
+ if (!is.Numeric(size, integer.valued = TRUE))
stop("bad input for argument 'size'")
if (!is.Numeric(shape1, positive = TRUE))
stop("bad input for argument 'shape1'")
@@ -1151,7 +1171,7 @@ my.dbinom <- function(x,
rbetabinom.ab = function(n, size, shape1, shape2, .dontuse.prob = NULL) {
- if (!is.Numeric(size, integer = TRUE))
+ if (!is.Numeric(size, integer.valued = TRUE))
stop("bad input for argument 'size'")
if (any(shape1 < 0, na.rm = TRUE))
stop("negative values for argument 'shape1' not allowed")
@@ -1159,7 +1179,8 @@ my.dbinom <- function(x,
stop("negative values for argument 'shape2' not allowed")
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ = TRUE, allow = 1, positive = TRUE))
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
if (length(size) != use.n) size = rep(size, len = use.n)
if (length(shape1) != use.n) shape1 = rep(shape1, len = use.n)
@@ -1221,6 +1242,8 @@ my.dbinom <- function(x,
expected.betabin.ab = function(nvec, shape1, shape2, first) {
+
+
NN = length(nvec)
ans = rep(0.0, len = NN)
if (first) {
@@ -1256,184 +1279,189 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
betabinomial.ab = function(lshape12 = "loge", earg = list(),
i1 = 1, i2 = NULL, imethod = 1,
- shrinkage.init = 0.95, nsimEIM = NULL, zero = NULL) {
- if (mode(lshape12) != "character" && mode(lshape12) != "name")
- lshape12 = as.character(substitute(lshape12))
- if (!is.Numeric(i1, positive = TRUE))
- stop("bad input for argument 'i1'")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, positive = TRUE) ||
- imethod > 3)
- stop("argument 'imethod' must be 1, 2 or 3")
-
- if (length(i2) && !is.Numeric(i2, positive = TRUE))
- stop("bad input for argument 'i2'")
- if (!is.list(earg)) earg = list()
-
- if (!is.null(nsimEIM)) {
- if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE))
- stop("bad input for argument 'nsimEIM'")
- if (nsimEIM <= 10)
- warning("'nsimEIM' should be an integer greater than 10, say")
- }
+ shrinkage.init = 0.95, nsimEIM = NULL,
+ zero = NULL) {
+ if (mode(lshape12) != "character" && mode(lshape12) != "name")
+ lshape12 = as.character(substitute(lshape12))
+ if (!is.Numeric(i1, positive = TRUE))
+ stop("bad input for argument 'i1'")
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 3)
+ stop("argument 'imethod' must be 1, 2 or 3")
+
+ if (length(i2) && !is.Numeric(i2, positive = TRUE))
+ stop("bad input for argument 'i2'")
+ if (!is.list(earg)) earg = list()
- new("vglmff",
- blurb = c("Beta-binomial model\n",
- "Links: ",
- namesof("shape1", lshape12, earg = earg), ", ",
- namesof("shape2", lshape12, earg = earg), "\n",
- "Mean: mu = shape1 / (shape1+shape2)", "\n",
- "Variance: mu * (1-mu) * (1+(w-1)*rho) / w, ",
- "where rho = 1 / (shape1+shape2+1)"),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (!all(w == 1))
- extra$orig.w = w
-
- if (is.null( .nsimEIM)) {
- save.weight <- control$save.weight <- FALSE
- }
+ if (!is.null(nsimEIM)) {
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE))
+ stop("bad input for argument 'nsimEIM'")
+ if (nsimEIM <= 10)
+ warning("'nsimEIM' should be an integer greater than 10, say")
+ }
- # Compute initial values for mustart -------
- mustart.orig = mustart
- eval(binomialff()@initialize) # Note: n,w,y,mustart is changed
- if (length(mustart.orig))
- mustart = mustart.orig # Retain it if inputted
- predictors.names =
- c(namesof("shape1", .lshape12, earg = .earg, tag = FALSE),
- namesof("shape2", .lshape12, earg = .earg, tag = FALSE))
+ new("vglmff",
+ blurb = c("Beta-binomial model\n",
+ "Links: ",
+ namesof("shape1", lshape12, earg = earg), ", ",
+ namesof("shape2", lshape12, earg = earg), "\n",
+ "Mean: mu = shape1 / (shape1+shape2)", "\n",
+ "Variance: mu * (1-mu) * (1+(w-1)*rho) / w, ",
+ "where rho = 1 / (shape1+shape2+1)"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (!all(w == 1))
+ extra$orig.w = w
- if (!length(etastart)) {
+ if (is.null( .nsimEIM)) {
+ save.weight <- control$save.weight <- FALSE
+ }
- mustart.use = if (length(mustart.orig)) mustart.orig else
- mustart
-
- shape1 = rep( .i1, len = n)
- shape2 = if (length( .i2 )) {
- rep( .i2, len = n)
- } else if (length(mustart.orig)) {
- shape1 * (1 / mustart.use - 1)
- } else if ( .imethod == 1) {
- shape1 * (1 / weighted.mean(y, w) - 1)
- } else if ( .imethod == 2) {
- temp777 = .sinit * weighted.mean(y, w) + (1- .sinit) * y
- shape1 * (1 / temp777 - 1)
- } else {
- shape1 * (1 / weighted.mean(mustart.use, w) - 1)
- }
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
- if (max(abs(ycounts - round(ycounts))) > 1.0e-6)
- warning("the response (as counts) does not appear to ",
- "be integer-valued. Am rounding to integer values.")
- ycounts = round(ycounts) # Make sure it is an integer
- etastart = cbind(theta2eta(shape1, .lshape12, earg = .earg),
- theta2eta(shape2, .lshape12, earg = .earg))
- mustart <- NULL # Since etastart has been computed.
- }
- }), list( .lshape12 = lshape12, .earg = earg, .i1 = i1, .i2 = i2,
- .nsimEIM = nsimEIM,
- .imethod = imethod, .sinit = shrinkage.init ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- shape1 = eta2theta(eta[,1], .lshape12, earg = .earg)
- shape2 = eta2theta(eta[,2], .lshape12, earg = .earg)
- shape1 / (shape1 + shape2)
- }, list( .lshape12 = lshape12, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = c("shape1" = .lshape12, "shape2" = .lshape12)
- misc$earg <- list(shape1 = .earg, shape2 = .earg)
- shape1 = eta2theta(eta[,1], .lshape12, earg = .earg)
- shape2 = eta2theta(eta[,2], .lshape12, earg = .earg)
- misc$rho = 1 / (shape1 + shape2 + 1)
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
- misc$zero <- .zero
- }), list( .lshape12 = lshape12, .earg = earg, .nsimEIM = nsimEIM,
- .zero = zero ))),
- loglikelihood = eval(substitute(
- function(mu,y,w,residuals = FALSE,eta, extra = NULL) {
+ mustart.orig = mustart
+ eval(binomialff()@initialize) # Note: n,w,y,mustart is changed
+ if (length(mustart.orig))
+ mustart = mustart.orig # Retain it if inputted
+ predictors.names =
+ c(namesof("shape1", .lshape12, earg = .earg, tag = FALSE),
+ namesof("shape2", .lshape12, earg = .earg, tag = FALSE))
+
+ if (!length(etastart)) {
+
+ mustart.use = if (length(mustart.orig)) mustart.orig else
+ mustart
+
+ shape1 = rep( .i1, len = n)
+ shape2 = if (length( .i2 )) {
+ rep( .i2, len = n)
+ } else if (length(mustart.orig)) {
+ shape1 * (1 / mustart.use - 1)
+ } else if ( .imethod == 1) {
+ shape1 * (1 / weighted.mean(y, w) - 1)
+ } else if ( .imethod == 2) {
+ temp777 = .sinit * weighted.mean(y, w) + (1- .sinit) * y
+ shape1 * (1 / temp777 - 1)
+ } else {
+ shape1 * (1 / weighted.mean(mustart.use, w) - 1)
+ }
ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
+ y * w # Convert proportions to counts
+ if (max(abs(ycounts - round(ycounts))) > 1.0e-6)
+ warning("the response (as counts) does not appear to ",
+ "be integer-valued. Am rounding to integer values.")
+ ycounts = round(ycounts) # Make sure it is an integer
+ etastart = cbind(theta2eta(shape1, .lshape12, earg = .earg),
+ theta2eta(shape2, .lshape12, earg = .earg))
+ mustart <- NULL # Since etastart has been computed.
+ }
+ }), list( .lshape12 = lshape12, .earg = earg, .i1 = i1, .i2 = i2,
+ .nsimEIM = nsimEIM,
+ .imethod = imethod, .sinit = shrinkage.init ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ shape1 = eta2theta(eta[,1], .lshape12, earg = .earg)
+ shape2 = eta2theta(eta[,2], .lshape12, earg = .earg)
+ shape1 / (shape1 + shape2)
+ }, list( .lshape12 = lshape12, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link = c("shape1" = .lshape12, "shape2" = .lshape12)
+ misc$earg <- list(shape1 = .earg, shape2 = .earg)
+ shape1 = eta2theta(eta[,1], .lshape12, earg = .earg)
+ shape2 = eta2theta(eta[,2], .lshape12, earg = .earg)
+ misc$rho = 1 / (shape1 + shape2 + 1)
+ misc$expected = TRUE
+ misc$nsimEIM = .nsimEIM
+ misc$zero <- .zero
+ }), list( .lshape12 = lshape12, .earg = earg,
+ .nsimEIM = nsimEIM, .zero = zero ))),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals = FALSE,eta, extra = NULL) {
+ ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
- smallno = 1.0e4 * .Machine$double.eps
- if (max(abs(ycounts - round(ycounts))) > smallno)
- warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts = round(ycounts)
+ smallno = 1.0e4 * .Machine$double.eps
+ if (max(abs(ycounts - round(ycounts))) > smallno)
+ warning("converting 'ycounts' to integer in @loglikelihood")
+ ycounts = round(ycounts)
- shape1 = eta2theta(eta[,1], .lshape12, earg = .earg)
- shape2 = eta2theta(eta[,2], .lshape12, earg = .earg)
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dbetabinom.ab(x = ycounts, size = nvec, shape1 = shape1,
- shape2 = shape2, log = TRUE ))
+ shape1 = eta2theta(eta[,1], .lshape12, earg = .earg)
+ shape2 = eta2theta(eta[,2], .lshape12, earg = .earg)
+ nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dbetabinom.ab(x = ycounts, size = nvec, shape1 = shape1,
+ shape2 = shape2, log = TRUE ))
+ }
+ }, list( .lshape12 = lshape12, .earg = earg ))),
+ vfamily = c("betabinomial.ab"),
+ deriv = eval(substitute(expression({
+ nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+ ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+ shape1 = eta2theta(eta[,1], .lshape12, earg = .earg)
+ shape2 = eta2theta(eta[,2], .lshape12, earg = .earg)
+ dshape1.deta = dtheta.deta(shape1, .lshape12, earg = .earg)
+ dshape2.deta = dtheta.deta(shape2, .lshape12, earg = .earg)
+ dl.dshape1 = digamma(shape1+ycounts) -
+ digamma(shape1+shape2+nvec) -
+ digamma(shape1) + digamma(shape1 + shape2)
+ dl.dshape2 = digamma(nvec + shape2 - ycounts) -
+ digamma(shape1 + shape2 + nvec) -
+ digamma(shape2) + digamma(shape1 + shape2)
+ (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ cbind(dl.dshape1 * dshape1.deta,
+ dl.dshape2 * dshape2.deta)
+ }), list( .lshape12 = lshape12, .earg = earg ))),
+ weight = eval(substitute(expression({
+ if (is.null( .nsimEIM)) {
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(2)
+ wz[,iam(1,1,M)] = -(expected.betabin.ab(nvec,shape1,shape2,
+ TRUE) -
+ trigamma(shape1+shape2+nvec) -
+ trigamma(shape1) + trigamma(shape1+shape2)) *
+ dshape1.deta^2
+ wz[,iam(2,2,M)] = -(expected.betabin.ab(nvec,shape1,shape2,
+ FALSE) -
+ trigamma(shape1+shape2+nvec) -
+ trigamma(shape2) + trigamma(shape1+shape2)) *
+ dshape2.deta^2
+ wz[,iam(2,1,M)] = -(trigamma(shape1+shape2) -
+ trigamma(shape1+shape2+nvec)) *
+ dshape1.deta * dshape2.deta
+ wz * (if (is.numeric(extra$orig.w)) extra$orig.w else 1)
+ } else {
+ run.varcov = 0
+ ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ dthetas.detas = cbind(dshape1.deta, dshape2.deta)
+
+ for (ii in 1:( .nsimEIM )) {
+ ysim = rbetabinom.ab(n = n, size = nvec, shape1 = shape1,
+ shape2 = shape2)
+ dl.dshape1 = digamma(shape1+ysim) -
+ digamma(shape1+shape2+nvec) -
+ digamma(shape1) + digamma(shape1+shape2)
+ dl.dshape2 = digamma(nvec+shape2-ysim) -
+ digamma(shape1+shape2+nvec) -
+ digamma(shape2) + digamma(shape1+shape2)
+ rm(ysim)
+ temp3 = cbind(dl.dshape1, dl.dshape2) # n x M matrix
+ run.varcov = ((ii-1) * run.varcov +
+ temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
}
- }, list( .lshape12 = lshape12, .earg = earg ))),
- vfamily = c("betabinomial.ab"),
- deriv = eval(substitute(expression({
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
- shape1 = eta2theta(eta[,1], .lshape12, earg = .earg)
- shape2 = eta2theta(eta[,2], .lshape12, earg = .earg)
- dshape1.deta = dtheta.deta(shape1, .lshape12, earg = .earg)
- dshape2.deta = dtheta.deta(shape2, .lshape12, earg = .earg)
- dl.dshape1 = digamma(shape1+ycounts) - digamma(shape1+shape2+nvec) -
- digamma(shape1) + digamma(shape1+shape2)
- dl.dshape2 = digamma(nvec + shape2 - ycounts) -
- digamma(shape1 + shape2 + nvec) -
- digamma(shape2) + digamma(shape1 + shape2)
- (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- cbind(dl.dshape1 * dshape1.deta, dl.dshape2 * dshape2.deta)
- }), list( .lshape12 = lshape12, .earg = earg ))),
- weight = eval(substitute(expression({
- if (is.null( .nsimEIM)) {
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(2)
- wz[,iam(1,1,M)] = -(expected.betabin.ab(nvec,shape1,shape2,
- TRUE) -
- trigamma(shape1+shape2+nvec) -
- trigamma(shape1) + trigamma(shape1+shape2)) *
- dshape1.deta^2
- wz[,iam(2,2,M)] = -(expected.betabin.ab(nvec,shape1,shape2,
- FALSE) -
- trigamma(shape1+shape2+nvec) -
- trigamma(shape2) + trigamma(shape1+shape2)) *
- dshape2.deta^2
- wz[,iam(2,1,M)] = -(trigamma(shape1+shape2) -
- trigamma(shape1+shape2+nvec)) *
- dshape1.deta * dshape2.deta
- wz * (if (is.numeric(extra$orig.w)) extra$orig.w else 1)
- } else {
- run.varcov = 0
- ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- dthetas.detas = cbind(dshape1.deta, dshape2.deta)
+ wz = if (intercept.only)
+ matrix(colMeans(run.varcov),
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
- for (ii in 1:( .nsimEIM )) {
- ysim = rbetabinom.ab(n = n, size = nvec, shape1 = shape1,
- shape2 = shape2)
- dl.dshape1 = digamma(shape1+ysim) -
- digamma(shape1+shape2+nvec) -
- digamma(shape1) + digamma(shape1+shape2)
- dl.dshape2 = digamma(nvec+shape2-ysim) -
- digamma(shape1+shape2+nvec) -
- digamma(shape2) + digamma(shape1+shape2)
- rm(ysim)
- temp3 = cbind(dl.dshape1, dl.dshape2) # n x M matrix
- run.varcov = ((ii-1) * run.varcov +
- temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
- }
- wz = if (intercept.only)
- matrix(colMeans(run.varcov),
- n, ncol(run.varcov), byrow = TRUE) else run.varcov
-
- wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
- wz * (if (is.numeric(extra$orig.w)) extra$orig.w else 1)
- }
- }), list( .lshape12 = lshape12, .earg = earg, .nsimEIM = nsimEIM ))))
+ wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
+ wz * (if (is.numeric(extra$orig.w)) extra$orig.w else 1)
+ }
+ }), list( .lshape12 = lshape12, .earg = earg,
+ .nsimEIM = nsimEIM ))))
}
@@ -1441,20 +1469,24 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
betageometric = function(lprob = "logit", lshape = "loge",
eprob = list(), eshape = list(),
iprob = NULL, ishape = 0.1,
- moreSummation=c(2,100), tolerance=1.0e-10, zero = NULL)
+ moreSummation = c(2, 100), tolerance = 1.0e-10,
+ zero = NULL)
{
if (mode(lprob) != "character" && mode(lprob) != "name")
lprob = as.character(substitute(lprob))
if (mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
+
if (!is.Numeric(ishape, positive = TRUE))
stop("bad input for argument 'ishape'")
- if (!is.Numeric(moreSummation, positive = TRUE, allow=2, integ = TRUE))
+ if (!is.Numeric(moreSummation, positive = TRUE,
+ allowable.length = 2, integer.valued = TRUE))
stop("bad input for argument 'moreSummation'")
- if (!is.Numeric(tolerance, positive = TRUE, allow = 1) ||
+ if (!is.Numeric(tolerance, positive = TRUE, allowable.length = 1) ||
1.0-tolerance >= 1.0)
stop("bad input for argument 'tolerance'")
- if (!is.list(eprob)) eprob = list()
+
+ if (!is.list(eprob)) eprob = list()
if (!is.list(eshape)) eshape = list()
new("vglmff",
@@ -1588,12 +1620,14 @@ seq2binomial = function(lprob1 = "logit", lprob2 = "logit",
lprob1 = as.character(substitute(lprob1))
if (mode(lprob2) != "character" && mode(lprob2) != "name")
lprob2 = as.character(substitute(lprob2))
+
if (length(iprob1) &&
(!is.Numeric(iprob1, positive = TRUE) || max(iprob1) >= 1))
stop("bad input for argument 'iprob1'")
if (length(iprob2) &&
(!is.Numeric(iprob2, positive = TRUE) || max(iprob2) >= 1))
stop("bad input for argument 'iprob2'")
+
if (!is.list(eprob1)) eprob1 = list()
if (!is.list(eprob2)) eprob2 = list()
@@ -1626,9 +1660,9 @@ seq2binomial = function(lprob1 = "logit", lprob2 = "logit",
c(namesof("prob1", .lprob1,earg= .eprob1, tag = FALSE),
namesof("prob2", .lprob2,earg= .eprob2, tag = FALSE))
prob1.init = if (is.Numeric( .iprob1)) rep( .iprob1, len = n) else
- rep(weighted.mean(y[,1], w=w), len = n)
- prob2.init = if (is.Numeric( .iprob2)) rep( .iprob2, len = n) else
- rep(weighted.mean(y[,2], w=w*y[,1]), len = n)
+ rep(weighted.mean(y[,1], w = w), len = n)
+ prob2.init = if (is.Numeric( .iprob2)) rep( .iprob2, length = n) else
+ rep(weighted.mean(y[,2], w = w*y[,1]), length = n)
if (!length(etastart)) {
etastart = cbind(theta2eta(prob1.init, .lprob1, earg = .eprob1),
theta2eta(prob2.init, .lprob2, earg = .eprob2))
@@ -1709,13 +1743,18 @@ zipebcom = function(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
lphi12 = as.character(substitute(lphi12))
if (mode(loratio) != "character" && mode(loratio) != "name")
loratio = as.character(substitute(loratio))
- if (!is.Numeric(tol, positive = TRUE, allow = 1) || tol > 0.1)
+
+ if (!is.Numeric(tol, positive = TRUE, allowable.length = 1) ||
+ tol > 0.1)
stop("bad input for argument 'tol'")
- if (!is.Numeric(addRidge, allow = 1, positive = TRUE) || addRidge > 0.5)
- stop("bad input for argument 'addRidge'")
+ if (!is.Numeric(addRidge, allowable.length = 1, positive = TRUE) ||
+ addRidge > 0.5)
+ stop("bad input for argument 'addRidge'")
+
if (!is.list(emu12)) emu12 = list()
if (!is.list(ephi12)) ephi12 = list()
if (!is.list(eoratio)) eoratio = list()
+
if (lmu12 != "cloglog")
warning("argument 'lmu12' should be 'cloglog'")
@@ -1801,7 +1840,7 @@ zipebcom = function(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, docheck = FALSE))
+ log = TRUE, dochecking = FALSE))
},
vfamily = c("zipebcom"),
deriv = eval(substitute(expression({
@@ -2066,7 +2105,8 @@ if (FALSE)
lmu12 = "probit"
emu12 = list()
if (is.Numeric(nsimEIM)) {
- if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE))
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE))
stop("bad input for argument 'nsimEIM'")
if (nsimEIM <= 100)
warning("'nsimEIM' should be an integer greater than 100")
@@ -2092,9 +2132,9 @@ if (FALSE)
save.weight <- control$save.weight <- FALSE
}
if (is.null(etastart)) {
- mu1.init= if (is.Numeric(.imu1)) rep(.imu1, len = n) else
+ mu1.init= if (is.Numeric(.imu1)) rep(.imu1, length = n) else
mu[,3] + mu[,4]
- mu2.init= if (is.Numeric(.imu2)) rep(.imu2, len = n) else
+ mu2.init= if (is.Numeric(.imu2)) rep(.imu2, length = n) else
mu[,2] + mu[,4]
etastart = cbind(theta2eta(mu1.init, .lmu12, earg = .emu12),
theta2eta(mu2.init, .lmu12, earg = .emu12))
@@ -2136,7 +2176,7 @@ if (FALSE)
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, docheck = FALSE))
+ log = TRUE, dochecking = FALSE))
}
}, list( .rho = rho ))),
vfamily = c("binom2.Rho", "binom2"),
diff --git a/R/family.bivariate.R b/R/family.bivariate.R
index 909b972..46d10b0 100644
--- a/R/family.bivariate.R
+++ b/R/family.bivariate.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -12,11 +12,13 @@
+
bilogistic4.control <- function(save.weight = TRUE, ...)
{
list(save.weight=save.weight)
}
+
bilogistic4 = function(llocation = "identity",
lscale = "loge",
iloc1 = NULL, iscale1 = NULL,
@@ -26,7 +28,8 @@ bilogistic4.control <- function(save.weight = TRUE, ...)
llocation = as.character(substitute(llocation))
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 2) stop("imethod must be 1 or 2")
new("vglmff",
@@ -62,14 +65,14 @@ bilogistic4.control <- function(save.weight = TRUE, ...)
scale.init1=sqrt(3)*sum(w*(y[, 1]-location.init1)^2)/(sum(w)*pi)
scale.init2=sqrt(3)*sum(w*(y[, 2]-location.init2)^2)/(sum(w)*pi)
}
- loc1.init = if (length(.iloc1)) rep(.iloc1, len = n) else
- rep(location.init1, len = n)
- loc2.init = if (length(.iloc2)) rep(.iloc2, len = n) else
- rep(location.init2, len = n)
- scale1.init = if (length(.iscale1)) rep(.iscale1, len = n) else
- rep(1, len = n)
- scale2.init = if (length(.iscale2)) rep(.iscale2, len = n) else
- rep(1, len = n)
+ loc1.init = if (length(.iloc1)) rep(.iloc1, length.out = n) else
+ rep(location.init1, length.out = n)
+ loc2.init = if (length(.iloc2)) rep(.iloc2, length.out = n) else
+ rep(location.init2, length.out = n)
+ scale1.init = if (length(.iscale1)) rep(.iscale1, length.out = n) else
+ rep(1, length.out = n)
+ scale2.init = if (length(.iscale2)) rep(.iscale2, length.out = n) else
+ rep(1, length.out = n)
if (.llocation == "loge") location.init1 = abs(location.init1) + 0.001
if (.llocation == "loge") location.init2 = abs(location.init2) + 0.001
etastart = cbind(theta2eta(location.init1, .llocation),
@@ -161,9 +164,9 @@ dbilogis4 = function(x1, x2, loc1 = 0, scale1 = 1,
L = max(length(x1), length(x2), length(loc1), length(loc2),
length(scale1), length(scale2))
- x1 = rep(x1, len = L); x2 = rep(x2, len = L);
- loc1 = rep(loc1, len = L); loc2 = rep(loc2, len = L);
- scale1 = rep(scale1, len = L); scale2 = rep(scale2, len = L);
+ x1 = rep(x1, length.out = L); x2 = rep(x2, length.out = L);
+ loc1 = rep(loc1, length.out = L); loc2 = rep(loc2, length.out = L);
+ scale1 = rep(scale1, length.out = L); scale2 = rep(scale2, length.out = L);
zedd1 = (-(x1-loc1)/scale1)
zedd2 = (-(x2-loc2)/scale2)
logdensity = log(2) + log(zedd1) + log(zedd2) - log(scale1) -
@@ -174,22 +177,26 @@ dbilogis4 = function(x1, x2, loc1 = 0, scale1 = 1,
pbilogis4 = function(q1, q2, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
- if (!is.Numeric(q1)) stop("bad input for 'q1'")
- if (!is.Numeric(q2)) stop("bad input for 'q2'")
- if (!is.Numeric(scale1, posit = TRUE)) stop("bad input for 'scale1'")
- if (!is.Numeric(scale2, posit = TRUE)) stop("bad input for 'scale2'")
+ if (!is.Numeric(q1)) stop("bad input for 'q1'")
+ if (!is.Numeric(q2)) stop("bad input for 'q2'")
+ if (!is.Numeric(scale1, positive = TRUE)) stop("bad input for 'scale1'")
+ if (!is.Numeric(scale2, positive = TRUE)) stop("bad input for 'scale2'")
- 1 / (1 + exp(-(q1-loc1)/scale1) + exp(-(q2-loc2)/scale2))
+ 1 / (1 + exp(-(q1-loc1)/scale1) + exp(-(q2-loc2)/scale2))
}
rbilogis4 = function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
- if (!is.Numeric(n, posit = TRUE,allow = 1,integ = TRUE)) stop("bad input for 'n'")
- if (!is.Numeric(scale1, posit = TRUE)) stop("bad input for 'scale1'")
- if (!is.Numeric(scale2, posit = TRUE)) stop("bad input for 'scale2'")
- y1 = rlogis(n, loc=loc1, scale=scale1)
+ if (!is.Numeric(n, positive = TRUE,
+ allowable.length = 1,integer.valued = TRUE))
+ stop("bad input for 'n'")
+ if (!is.Numeric(scale1, positive = TRUE))
+ stop("bad input for 'scale1'")
+ if (!is.Numeric(scale2, positive = TRUE))
+ stop("bad input for 'scale2'")
+ y1 = rlogis(n, location = loc1, scale = scale1)
ezedd1 = exp(-(y1-loc1)/scale1)
y2 = loc2 - scale2 * log(1/sqrt(runif(n) / (1 + ezedd1)^2) - 1 - ezedd1)
cbind(y1, y2)
@@ -257,20 +264,20 @@ rbilogis4 = function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
sumx = sumx * 1.1; sumxp = sumxp * 1.2;
sumy = sumy * 1.2; sumyp = sumyp * 1.3;
}
- ainit = if (length(.ia)) rep(.ia, len = n) else
+ ainit = if (length(.ia)) rep(.ia, length.out = n) else
arr / (sumx + sumyp)
- apinit = if (length(.iap)) rep(.iap,len = n) else
+ apinit = if (length(.iap)) rep(.iap,length.out = n) else
(n-arr)/(sumxp-sumyp)
- binit = if (length(.ib)) rep(.ib, len = n) else
+ binit = if (length(.ib)) rep(.ib, length.out = n) else
(n-arr)/(sumx +sumyp)
- bpinit = if (length(.ib)) rep(.ibp,len = n) else
+ bpinit = if (length(.ib)) rep(.ibp,length.out = n) else
arr / (sumy - sumx)
etastart =
- cbind(theta2eta(rep(ainit, len = n), .la, earg = .ea ),
- theta2eta(rep(apinit, len = n), .lap, earg = .eap ),
- theta2eta(rep(binit, len = n), .lb, earg = .eb ),
- theta2eta(rep(bpinit, len = n), .lbp, earg = .ebp ))
+ cbind(theta2eta(rep(ainit, length.out = n), .la, earg = .ea ),
+ theta2eta(rep(apinit, length.out = n), .lap, earg = .eap ),
+ theta2eta(rep(binit, length.out = n), .lb, earg = .eb ),
+ theta2eta(rep(bpinit, length.out = n), .lbp, earg = .ebp ))
}
}), list(.la = la, .lap = lap, .lb = lb, .lbp = lbp,
.ea = ea, .eap = eap, .eb = eb, .ebp = ebp,
@@ -379,7 +386,8 @@ rbilogis4 = function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
if (!is.null(ishape2))
if (!is.Numeric(ishape2, positive = TRUE))
stop("'ishape2' must be positive or NULL")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, positi = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 2.5)
stop("argument 'imethod' must be 1 or 2")
@@ -425,12 +433,12 @@ rbilogis4 = function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
ainit = getMaxMin(a.grid, objfun=mcg2.loglik,
y=y, x=x, w=w, maximize = TRUE,
extraargs = extraargs)
- ainit = rep(if(is.Numeric( .iscale )) .iscale else ainit, len = n)
+ ainit = rep(if(is.Numeric( .iscale )) .iscale else ainit, length.out = n)
pinit = (1/ainit) * abs(momentsY[1]) + 0.01
qinit = (1/ainit) * abs(momentsY[2] - momentsY[1]) + 0.01
- pinit = rep(if(is.Numeric( .ishape1 )) .ishape1 else pinit, len = n)
- qinit = rep(if(is.Numeric( .ishape2 )) .ishape2 else qinit, len = n)
+ pinit = rep(if(is.Numeric( .ishape1 )) .ishape1 else pinit, length.out = n)
+ qinit = rep(if(is.Numeric( .ishape2 )) .ishape2 else qinit, length.out = n)
etastart = cbind(theta2eta(ainit, .lscale),
theta2eta(pinit, .lshape1),
@@ -506,9 +514,12 @@ rbilogis4 = function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
rfrank = function(n, alpha) {
- if (!is.Numeric(n, posit = TRUE, allow = 1, integ = TRUE)) stop("bad input for 'n'")
- if (!is.Numeric(alpha, posit = TRUE)) stop("bad input for 'alpha'")
- alpha = rep(alpha, len = n)
+ if (!is.Numeric(n, positive = TRUE,
+ allowable.length = 1, integer.valued = TRUE))
+ stop("bad input for argument 'n'")
+ if (!is.Numeric(alpha, positive = TRUE))
+ stop("bad input for argument 'alpha'")
+ alpha = rep(alpha, length.out = n)
U = runif(n)
V = runif(n)
T = alpha^U + (alpha - alpha^U) * V
@@ -529,12 +540,12 @@ rfrank = function(n, alpha) {
pfrank = function(q1, q2, alpha) {
if (!is.Numeric(q1)) stop("bad input for 'q1'")
if (!is.Numeric(q2)) stop("bad input for 'q2'")
- if (!is.Numeric(alpha, posit = TRUE)) stop("bad input for 'alpha'")
+ if (!is.Numeric(alpha, positive = TRUE)) stop("bad input for 'alpha'")
L = max(length(q1), length(q2), length(alpha))
- alpha = rep(alpha, len = L)
- q1 = rep(q1, len = L)
- q2 = rep(q2, len = L)
+ alpha = rep(alpha, length.out = L)
+ q1 = rep(q1, length.out = L)
+ q2 = rep(q2, length.out = L)
x=q1; y=q2
index = (x >= 1 & y < 1) | (y >= 1 & x < 1) |
@@ -561,12 +572,12 @@ dfrank = function(x1, x2, alpha, log = FALSE) {
if (!is.Numeric(x1)) stop("bad input for 'x1'")
if (!is.Numeric(x2)) stop("bad input for 'x2'")
- if (!is.Numeric(alpha, posit = TRUE)) stop("bad input for 'alpha'")
+ if (!is.Numeric(alpha, positive = TRUE)) stop("bad input for 'alpha'")
L = max(length(x1), length(x2), length(alpha))
- alpha = rep(alpha, len = L)
- x1 = rep(x1, len = L)
- x2 = rep(x2, len = L)
+ alpha = rep(alpha, length.out = L)
+ x1 = rep(x1, length.out = L)
+ x2 = rep(x2, length.out = L)
if (log.arg) {
denom = alpha-1 + (alpha^x1 - 1) * (alpha^x2 - 1)
@@ -595,15 +606,18 @@ frank.control <- function(save.weight = TRUE, ...)
- frank = function(lapar = "loge", eapar=list(), iapar = 2, nsimEIM = 250) {
+ frank = function(lapar = "loge", eapar = list(), iapar = 2, nsimEIM = 250) {
if (mode(lapar) != "character" && mode(lapar) != "name")
- lapar = as.character(substitute(lapar))
+ lapar = as.character(substitute(lapar))
if (!is.Numeric(iapar, positive = TRUE))
- stop("'iapar' must be positive")
+ stop("'iapar' must be positive")
+
if (!is.list(eapar)) eapar = list()
if (length(nsimEIM) &&
- (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 50))
- stop("'nsimEIM' should be an integer greater than 50")
+ (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE) ||
+ nsimEIM <= 50))
+ stop("'nsimEIM' should be an integer greater than 50")
new("vglmff",
blurb = c("Frank's bivariate distribution\n",
@@ -619,7 +633,7 @@ frank.control <- function(save.weight = TRUE, ...)
if (length(dimnames(y)))
extra$dimnamesy2 = dimnames(y)[[2]]
if (!length(etastart)) {
- apar.init = rep(.iapar, len = n)
+ apar.init = rep(.iapar, length.out = n)
etastart = cbind(theta2eta(apar.init, .lapar, earg = .eapar ))
}
}), list( .lapar = lapar, .eapar=eapar, .iapar=iapar))),
@@ -726,7 +740,7 @@ frank.control <- function(save.weight = TRUE, ...)
stop("the response has values that are out of range")
predictors.names = c(namesof("theta", .ltheta, short = TRUE))
if (!length(etastart)) {
- theta.init = if (length( .itheta)) rep(.itheta, len = n) else {
+ theta.init = if (length( .itheta)) rep(.itheta, length.out = n) else {
1 / (y[, 2] - 1 + 0.01)
}
etastart = cbind(theta2eta(theta.init, .ltheta))
@@ -771,18 +785,23 @@ frank.control <- function(save.weight = TRUE, ...)
- morgenstern = function(lapar = "rhobit", earg =list(), iapar = NULL, tola0 = 0.01,
+ morgenstern = function(lapar = "rhobit", earg = list(), iapar = NULL, tola0 = 0.01,
imethod = 1) {
if (mode(lapar) != "character" && mode(lapar) != "name")
lapar = as.character(substitute(lapar))
if (!is.list(earg)) earg = list()
- if (length(iapar) && (!is.Numeric(iapar, allow = 1) || abs(iapar) >= 1))
- stop("'iapar' must be a single number between -1 and 1")
- if (!is.Numeric(tola0, allow = 1, posit = TRUE))
- stop("'tola0' must be a single positive number")
+
+ if (length(iapar) &&
+ (!is.Numeric(iapar, allowable.length = 1) ||
+ abs(iapar) >= 1))
+ stop("argument 'iapar' must be a single number between -1 and 1")
+
+ if (!is.Numeric(tola0, allowable.length = 1, positive = TRUE))
+ stop("argument 'tola0' must be a single positive number")
if (length(iapar) && abs(iapar) <= tola0)
- stop("'iapar' must not be between -tola0 and tola0")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, positi = TRUE) ||
+ stop("argument 'iapar' must not be between -tola0 and tola0")
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 2.5)
stop("argument 'imethod' must be 1 or 2")
@@ -799,14 +818,14 @@ frank.control <- function(save.weight = TRUE, ...)
if (length(dimnames(y)))
extra$dimnamesy2 = dimnames(y)[[2]]
if (!length(etastart)) {
- ainit = if (length(.iapar)) rep(.iapar, len = n) else {
+ ainit = if (length(.iapar)) rep(.iapar, length.out = n) else {
mean1 = if ( .imethod == 1) median(y[, 1]) else mean(y[, 1])
mean2 = if ( .imethod == 1) median(y[, 2]) else mean(y[, 2])
Finit = 0.01 + mean(y[, 1] <= mean1 & y[, 2] <= mean2)
((Finit+expm1(-mean1)+exp(-mean2)) / exp(-mean1-mean2) - 1)/(
expm1(-mean1) * expm1(-mean2))
}
- etastart = theta2eta(rep(ainit, len = n), .lapar, earg = .earg )
+ etastart = theta2eta(rep(ainit, length.out = n), .lapar, earg = .earg )
}
}), list( .iapar=iapar, .lapar = lapar, .earg = earg,
.imethod = imethod ))),
@@ -867,17 +886,21 @@ frank.control <- function(save.weight = TRUE, ...)
rfgm = function(n, alpha) {
- if (!is.Numeric(n, posit = TRUE, allow = 1, integ = TRUE)) stop("bad input for 'n'")
- if (!is.Numeric(alpha)) stop("bad input for 'alpha'")
- if (any(abs(alpha) > 1)) stop("'alpha' values out of range")
-
- y1 = V1 = runif(n)
- V2 = runif(n)
- temp = 2*y1 - 1
- A = alpha * temp - 1
- B = sqrt(1 - 2 * alpha * temp + (alpha*temp)^2 + 4 * alpha * V2 * temp)
- y2 = 2 * V2 / (B - A)
- matrix(c(y1,y2), nrow=n, ncol = 2)
+ if (!is.Numeric(n, positive = TRUE,
+ allowable.length = 1, integer.valued = TRUE))
+ stop("bad input for argument 'n'")
+ if (!is.Numeric(alpha))
+ stop("bad input for argument 'alpha'")
+ if (any(abs(alpha) > 1))
+ stop("argument 'alpha' has values out of range")
+
+ y1 = V1 = runif(n)
+ V2 = runif(n)
+ temp = 2*y1 - 1
+ A = alpha * temp - 1
+ B = sqrt(1 - 2 * alpha * temp + (alpha*temp)^2 + 4 * alpha * V2 * temp)
+ y2 = 2 * V2 / (B - A)
+ matrix(c(y1, y2), nrow = n, ncol = 2)
}
@@ -891,9 +914,9 @@ dfgm = function(x1, x2, alpha, log = FALSE) {
stop("bad input for argument 'log'")
L = max(length(x1), length(x2), length(alpha))
- if (length(x1) != L) x1 = rep(x1, len = L)
- if (length(x2) != L) x2 = rep(x2, len = L)
- if (length(alpha) != L) alpha = rep(alpha, len = L)
+ if (length(x1) != L) x1 = rep(x1, length.out = L)
+ if (length(x2) != L) x2 = rep(x2, length.out = L)
+ if (length(alpha) != L) alpha = rep(alpha, length.out = L)
ans = 0 * x1
xnok = (x1 <= 0) | (x1 >= 1) | (x2 <= 0) | (x2 >= 1)
if ( log.arg ) {
@@ -916,9 +939,9 @@ pfgm = function(q1, q2, alpha) {
if (any(abs(alpha) > 1)) stop("'alpha' values out of range")
L = max(length(q1), length(q2), length(alpha))
- if (length(q1) != L) q1 = rep(q1, len = L)
- if (length(q2) != L) q2 = rep(q2, len = L)
- if (length(alpha) != L) alpha = rep(alpha, len = L)
+ if (length(q1) != L) q1 = rep(q1, length.out = L)
+ if (length(q2) != L) q2 = rep(q2, length.out = L)
+ if (length(alpha) != L) alpha = rep(alpha, length.out = L)
x=q1; y=q2
index = (x >= 1 & y<1) | (y >= 1 & x<1) | (x <= 0 | y <= 0) | (x >= 1 & y >= 1)
@@ -943,20 +966,25 @@ fgm.control <- function(save.weight = TRUE, ...)
- fgm = function(lapar = "rhobit", earg =list(), iapar = NULL,
+ fgm = function(lapar = "rhobit", earg = list(), iapar = NULL,
imethod = 1, nsimEIM = 200) {
if (mode(lapar) != "character" && mode(lapar) != "name")
lapar = as.character(substitute(lapar))
if (!is.list(earg)) earg = list()
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, positi = TRUE) ||
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 2.5)
stop("argument 'imethod' must be 1 or 2")
if (!length(nsimEIM) ||
- (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 50))
- stop("'nsimEIM' should be an integer greater than 50")
+ (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE) ||
+ nsimEIM <= 50))
+ stop("'nsimEIM' should be an integer greater than 50")
if (length(iapar) &&
(abs(iapar) >= 1))
- stop("'iapar' should be less than 1 in absolute value")
+ stop("'iapar' should be less than 1 in absolute value")
+
new("vglmff",
blurb = c("Farlie-Gumbel-Morgenstern distribution\n",
@@ -982,7 +1010,7 @@ fgm.control <- function(save.weight = TRUE, ...)
ainit = min(0.95, max(ainit, -0.95))
- etastart = theta2eta(rep(ainit, len = n), .lapar, earg = .earg )
+ etastart = theta2eta(rep(ainit, length.out = n), .lapar, earg = .earg )
}
}), list( .iapar=iapar, .lapar = lapar, .earg = earg,
.imethod = imethod ))),
@@ -1045,16 +1073,19 @@ fgm.control <- function(save.weight = TRUE, ...)
- gumbelIbiv = function(lapar = "identity", earg =list(),
+ gumbelIbiv = function(lapar = "identity", earg = list(),
iapar = NULL, imethod = 1) {
if (mode(lapar) != "character" && mode(lapar) != "name")
lapar = as.character(substitute(lapar))
if (!is.list(earg)) earg = list()
- if (length(iapar) && !is.Numeric(iapar, allow = 1))
- stop("'iapar' must be a single number")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, positi = TRUE) ||
+
+ if (length(iapar) &&
+ !is.Numeric(iapar, allowable.length = 1))
+ stop("'iapar' must be a single number")
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 2.5)
- stop("argument 'imethod' must be 1 or 2")
+ stop("argument 'imethod' must be 1 or 2")
new("vglmff",
blurb = c("Gumbel's Type I bivariate distribution\n",
@@ -1067,13 +1098,13 @@ fgm.control <- function(save.weight = TRUE, ...)
stop("the response must have non-negative values only")
predictors.names = c(namesof("apar", .lapar, earg = .earg , short = TRUE))
if (!length(etastart)) {
- ainit = if (length( .iapar )) rep( .iapar, len = n) else {
+ ainit = if (length( .iapar )) rep( .iapar, length.out = n) else {
mean1 = if ( .imethod == 1) median(y[, 1]) else mean(y[, 1])
mean2 = if ( .imethod == 1) median(y[, 2]) else mean(y[, 2])
Finit = 0.01 + mean(y[, 1] <= mean1 & y[, 2] <= mean2)
(log(Finit+expm1(-mean1)+exp(-mean2))+mean1+mean2)/(mean1*mean2)
}
- etastart = theta2eta(rep(ainit, len = n), .lapar, earg = .earg )
+ etastart = theta2eta(rep(ainit, length.out = n), .lapar, earg = .earg )
}
}), list( .iapar=iapar, .lapar = lapar, .earg = earg,
.imethod = imethod ))),
@@ -1142,12 +1173,12 @@ fgm.control <- function(save.weight = TRUE, ...)
pplack = function(q1, q2, oratio) {
if (!is.Numeric(q1)) stop("bad input for 'q1'")
if (!is.Numeric(q2)) stop("bad input for 'q2'")
- if (!is.Numeric(oratio, posit = TRUE)) stop("bad input for 'oratio'")
+ if (!is.Numeric(oratio, positive = TRUE)) stop("bad input for 'oratio'")
L = max(length(q1), length(q2), length(oratio))
- if (length(q1) != L) q1 = rep(q1, len = L)
- if (length(q2) != L) q2 = rep(q2, len = L)
- if (length(oratio) != L) oratio = rep(oratio, len = L)
+ if (length(q1) != L) q1 = rep(q1, length.out = L)
+ if (length(q2) != L) q2 = rep(q2, length.out = L)
+ if (length(oratio) != L) oratio = rep(oratio, length.out = L)
x=q1; y=q2
index = (x >= 1 & y < 1) | (y >= 1 & x < 1) |
@@ -1173,9 +1204,12 @@ pplack = function(q1, q2, oratio) {
rplack = function(n, oratio) {
- if (!is.Numeric(n, posit = TRUE, allow = 1, integ = TRUE)) stop("bad input for 'n'")
- if (!is.Numeric(oratio, posit = TRUE)) stop("bad input for 'oratio'")
- if (length(oratio) != n) oratio = rep(oratio, len = n)
+ if (!is.Numeric(n, positive = TRUE,
+ allowable.length = 1, integer.valued = TRUE))
+ stop("bad input for 'n'")
+ if (!is.Numeric(oratio, positive = TRUE))
+ stop("bad input for 'oratio'")
+ if (length(oratio) != n) oratio = rep(oratio, length.out = n)
y1 = U = runif(n)
V = runif(n)
@@ -1193,12 +1227,12 @@ dplack = function(x1, x2, oratio, log = FALSE) {
log.arg = log
rm(log)
- if (!is.Numeric(oratio, posit = TRUE))
+ if (!is.Numeric(oratio, positive = TRUE))
stop("bad input for 'oratio'")
L = max(length(x1), length(x2), length(oratio))
- if (length(x1) != L) x1 = rep(x1, len = L)
- if (length(x2) != L) x2 = rep(x2, len = L)
- if (length(oratio) != L) oratio = rep(oratio, len = L)
+ if (length(x1) != L) x1 = rep(x1, length.out = L)
+ if (length(x2) != L) x2 = rep(x2, length.out = L)
+ if (length(oratio) != L) oratio = rep(oratio, length.out = L)
if ( !is.logical( log.arg ) || length( log.arg ) != 1 )
stop("bad input for argument 'log'")
@@ -1224,14 +1258,15 @@ plackett.control <- function(save.weight = TRUE, ...)
- plackett = function(link = "loge", earg =list(),
+ plackett = function(link = "loge", earg = list(),
ioratio = NULL, imethod = 1, nsimEIM = 200) {
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
- if (length(ioratio) && (!is.Numeric(ioratio, posit = TRUE)))
+ if (length(ioratio) && (!is.Numeric(ioratio, positive = TRUE)))
stop("'ioratio' must be positive")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 2) stop("imethod must be 1 or 2")
new("vglmff",
@@ -1263,7 +1298,7 @@ plackett.control <- function(save.weight = TRUE, ...)
(0.5 + sum(w[(y[, 1] >= y10) & (y[, 2] < y20)]))))
}
}
- etastart = theta2eta(rep(orinit, len = n), .link, earg = .earg)
+ etastart = theta2eta(rep(orinit, length.out = n), .link, earg = .earg)
}
}), list( .ioratio=ioratio, .link = link, .earg = earg,
.imethod = imethod ))),
@@ -1340,9 +1375,9 @@ damh = function(x1, x2, alpha, log = FALSE) {
if (!is.Numeric(alpha)) stop("bad input for 'alpha'")
if (any(abs(alpha) > 1)) stop("'alpha' values out of range")
L = max(length(x1), length(x2), length(alpha))
- alpha = rep(alpha, len = L)
- x1 = rep(x1, len = L)
- x2 = rep(x2, len = L)
+ alpha = rep(alpha, length.out = L)
+ x1 = rep(x1, length.out = L)
+ x2 = rep(x2, length.out = L)
temp = 1-alpha*(1-x1)*(1-x2)
if (log.arg) {
ans = log1p(-alpha+2*alpha*x1*x2/temp) - 2*log(temp)
@@ -1361,9 +1396,9 @@ pamh = function(q1, q2, alpha) {
if (any(abs(alpha) > 1)) stop("'alpha' values out of range")
L = max(length(q1), length(q2), length(alpha))
- if (length(q1) != L) q1 = rep(q1, len = L)
- if (length(q2) != L) q2 = rep(q2, len = L)
- if (length(alpha) != L) alpha = rep(alpha, len = L)
+ if (length(q1) != L) q1 = rep(q1, length.out = L)
+ if (length(q2) != L) q2 = rep(q2, length.out = L)
+ if (length(alpha) != L) alpha = rep(alpha, length.out = L)
x=q1; y=q2
index = (x >= 1 & y < 1) | (y >= 1 & x < 1) |
@@ -1381,9 +1416,13 @@ pamh = function(q1, q2, alpha) {
}
ramh = function(n, alpha) {
- if (!is.Numeric(n, posit = TRUE, allow = 1, integ = TRUE)) stop("bad input for 'n'")
- if (!is.Numeric(alpha)) stop("bad input for 'alpha'")
- if (any(abs(alpha) > 1)) stop("'alpha' values out of range")
+ if (!is.Numeric(n, positive = TRUE, allowable.length = 1,
+ integer.valued = TRUE))
+ stop("bad input for 'n'")
+ if (!is.Numeric(alpha))
+ stop("bad input for 'alpha'")
+ if (any(abs(alpha) > 1))
+ stop("'alpha' values out of range")
U1 = V1 = runif(n)
V2 = runif(n)
@@ -1394,24 +1433,32 @@ ramh = function(n, alpha) {
matrix(c(U1,U2), nrow=n, ncol = 2)
}
+
amh.control <- function(save.weight = TRUE, ...)
{
list(save.weight=save.weight)
}
- amh = function(lalpha = "rhobit", ealpha=list(), ialpha = NULL,
+
+ amh = function(lalpha = "rhobit", ealpha = list(), ialpha = NULL,
imethod = 1, nsimEIM = 250)
{
if (mode(lalpha) != "character" && mode(lalpha) != "name")
lalpha = as.character(substitute(lalpha))
if (!is.list(ealpha)) ealpha = list()
+
if (length(ialpha) && (abs(ialpha) > 1))
stop("'ialpha' should be less than or equal to 1 in absolute value")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
- imethod > 2) stop("imethod must be 1 or 2")
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("imethod must be 1 or 2")
if (length(nsimEIM) &&
- (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 50))
- stop("'nsimEIM' should be an integer greater than 50")
+ (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE) ||
+ nsimEIM <= 50))
+ stop("'nsimEIM' should be an integer greater than 50")
+
new("vglmff",
blurb = c("Ali-Mikhail-Haq distribution\n",
@@ -1435,7 +1482,7 @@ amh.control <- function(save.weight = TRUE, ...)
(1 - (mean1 * mean2 / Finit)) / ((1-mean1) * (1-mean2))
}
ainit = min(0.95, max(ainit, -0.95))
- etastart = theta2eta(rep(ainit, len = n), .lalpha, earg = .ealpha )
+ etastart = theta2eta(rep(ainit, length.out = n), .lalpha, earg = .ealpha )
}
}), list( .lalpha = lalpha, .ealpha = ealpha, .ialpha=ialpha,
.imethod = imethod))),
@@ -1560,7 +1607,8 @@ dbinorm = function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
if(!trivial1 && !trivial2)
stop("only one of 'equalmean' and 'equalsd' can be assigned a value")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 2) stop("argument 'imethod' must be 1 or 2")
new("vglmff",
@@ -1600,13 +1648,13 @@ dbinorm = function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
if (!length(etastart)) {
imean1 = rep(if (length( .imean1 )) .imean1 else
- weighted.mean(y[, 1], w = w), len = n)
+ weighted.mean(y[, 1], w = w), length.out = n)
imean2 = rep(if (length( .imean2 )) .imean2 else
- weighted.mean(y[, 2], w = w), len = n)
- isd1 = rep(if (length( .isd1 )) .isd1 else sd(y[, 1]), len = n)
- isd2 = rep(if (length( .isd2 )) .isd2 else sd(y[, 2]), len = n)
+ weighted.mean(y[, 2], w = w), length.out = n)
+ isd1 = rep(if (length( .isd1 )) .isd1 else sd(y[, 1]), length.out = n)
+ isd2 = rep(if (length( .isd2 )) .isd2 else sd(y[, 2]), length.out = n)
irho = rep(if (length( .irho )) .irho else cor(y[, 1], y[, 2]),
- len = n)
+ length.out = n)
if ( .imethod == 2) {
imean1 = abs(imean1) + 0.01
diff --git a/R/family.categorical.R b/R/family.categorical.R
index b406c24..5bbd4cd 100644
--- a/R/family.categorical.R
+++ b/R/family.categorical.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -40,10 +40,10 @@ process.categorical.data.vgam = expression({
if (!exists("delete.zero.colns") ||
(exists("delete.zero.colns") && delete.zero.colns)) {
sumy2 = colSums(y)
- if (any(index <- sumy2==0)) {
+ if (any(index <- sumy2 == 0)) {
y = y[,!index, drop = FALSE]
sumy2 = sumy2[!index]
- if (all(index) || ncol(y)<=1)
+ if (all(index) || ncol(y) <= 1)
stop("'y' matrix has 0 or 1 columns")
warning("Deleted ", sum(!index),
" columns of the response matrix due to zero counts")
@@ -51,7 +51,7 @@ process.categorical.data.vgam = expression({
}
- if (any(miss <- (nvec==0))) {
+ if (any(miss <- (nvec == 0))) {
smiss <- sum(miss)
warning("Deleted ", smiss,
" rows of the response matrix due to zero counts")
@@ -111,7 +111,7 @@ Deviance.categorical.data.vgam <-
if (M > 1)
return(NULL)
devi = devi %*% rep(1, ncol(devi)) # deviance = \sum_i devi[i]
- return(c(sign(y[,1] - mu[,1]) * sqrt(abs(devi) * w)))
+ return(c(sign(y[, 1] - mu[, 1]) * sqrt(abs(devi) * w)))
} else
sum(w * devi)
}
@@ -185,8 +185,8 @@ dmultinomial = function(x, size = NULL, prob, log = FALSE,
eval(process.categorical.data.vgam)
M = ncol(y) - 1
mynames = if ( .reverse)
- paste("P[Y=", 2:(M+1),"|Y<=", 2:(M+1),"]", sep = "") else
- paste("P[Y=", 1:M, "|Y>=", 1:M, "]", sep = "")
+ paste("P[Y = ", 2:(M+1),"|Y< = ", 2:(M+1),"]", sep = "") else
+ paste("P[Y = ", 1:M, "|Y> = ", 1:M, "]", sep = "")
predictors.names = namesof(mynames, .link, short = TRUE, earg = .earg)
y.names = paste("mu", 1:(M+1), sep = "")
extra$mymat = if ( .reverse ) tapplymat1(y, "cumsum") else
@@ -231,7 +231,8 @@ dmultinomial = function(x, size = NULL, prob, log = FALSE,
theta2eta(djr, .link, earg = .earg )
} else {
M = ncol(mu) - 1
- dj = if (M==1) mu[,1] else mu[,1:M]/(1-cbind(0,cump[,1:(M-1)]))
+ dj = if (M == 1) mu[, 1] else
+ mu[, 1:M] / (1 - cbind(0, cump[, 1:(M-1)]))
theta2eta(dj, .link, earg = .earg )
}
}, list( .earg = earg, .link = link, .reverse = reverse) )),
@@ -250,7 +251,7 @@ dmultinomial = function(x, size = NULL, prob, log = FALSE,
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, docheck = FALSE))
+ log = TRUE, dochecking = FALSE))
},
vfamily = c("sratio", "vcategorical"),
deriv = eval(substitute(expression({
@@ -274,12 +275,12 @@ dmultinomial = function(x, size = NULL, prob, log = FALSE,
cump = tapplymat1(mu, "cumsum")
ddjr.deta = dtheta.deta(djr, .link, earg = .earg )
wz = c(w) * ddjr.deta^2 *
- (mu[,-1] / djr^2 + cump[,1:M] / (1-djr)^2)
+ (mu[,-1] / djr^2 + cump[, 1:M] / (1-djr)^2)
} else {
ccump = tapplymat1(mu[,ncol(mu):1], "cumsum")[,ncol(mu):1]
ddj.deta = dtheta.deta(dj, .link, earg = .earg )
wz = c(w) * ddj.deta^2 *
- (mu[,1:M] / dj^2 + ccump[,-1] / (1-dj)^2)
+ (mu[, 1:M] / dj^2 + ccump[,-1] / (1-dj)^2)
}
wz
@@ -315,8 +316,8 @@ dmultinomial = function(x, size = NULL, prob, log = FALSE,
eval(process.categorical.data.vgam)
M = ncol(y) - 1
mynames = if ( .reverse )
- paste("P[Y<",2:(M+1),"|Y<=",2:(M+1),"]", sep = "") else
- paste("P[Y>",1:M,"|Y>=",1:M,"]", sep = "")
+ paste("P[Y<",2:(M+1),"|Y< = ",2:(M+1),"]", sep = "") else
+ paste("P[Y>",1:M,"|Y> = ",1:M,"]", sep = "")
predictors.names = namesof(mynames, .link, short = TRUE, earg = .earg)
y.names = paste("mu", 1:(M+1), sep = "")
extra$mymat = if ( .reverse ) tapplymat1(y, "cumsum") else
@@ -359,7 +360,8 @@ dmultinomial = function(x, size = NULL, prob, log = FALSE,
theta2eta(djrs, .link, earg = .earg )
} else {
M = ncol(mu) - 1
- djs = if (M==1) 1-mu[,1] else 1-mu[,1:M]/(1-cbind(0,cump[,1:(M-1)]))
+ djs = if (M == 1) 1 - mu[, 1] else
+ 1 - mu[, 1:M] / (1 - cbind(0, cump[, 1:(M-1)]))
theta2eta(djs, .link, earg = .earg )
}
}, list( .earg = earg, .link = link, .reverse = reverse) )),
@@ -378,7 +380,7 @@ dmultinomial = function(x, size = NULL, prob, log = FALSE,
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, docheck = FALSE))
+ log = TRUE, dochecking = FALSE))
},
vfamily = c("cratio", "vcategorical"),
deriv = eval(substitute(expression({
@@ -417,7 +419,7 @@ dmultinomial = function(x, size = NULL, prob, log = FALSE,
-vglm.multinomial.deviance.control = function(maxit=21, panic=FALSE, ...)
+vglm.multinomial.deviance.control = function(maxit=21, panic = FALSE, ...)
{
if (maxit < 1) {
warning("bad value of maxit; using 21 instead")
@@ -426,7 +428,7 @@ vglm.multinomial.deviance.control = function(maxit=21, panic=FALSE, ...)
list(maxit=maxit, panic=as.logical(panic)[1])
}
-vglm.multinomial.control = function(maxit=21, panic=FALSE,
+vglm.multinomial.control = function(maxit=21, panic = FALSE,
criterion=c("aic1", "aic2", names( .min.criterion.VGAM )), ...)
{
if (mode(criterion) != "character" && mode(criterion) != "name")
@@ -440,11 +442,11 @@ vglm.multinomial.control = function(maxit=21, panic=FALSE,
}
list(maxit=maxit, panic=as.logical(panic)[1],
criterion=criterion,
- min.criterion=c("aic1"=FALSE, "aic2" = TRUE, .min.criterion.VGAM))
+ min.criterion=c("aic1" = FALSE, "aic2" = TRUE, .min.criterion.VGAM))
}
-vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic = TRUE, ...)
+vglm.vcategorical.control = function(maxit=30, trace = FALSE, panic = TRUE, ...)
{
if (maxit < 1) {
warning("bad value of maxit; using 200 instead")
@@ -468,16 +470,16 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic = TRUE, ...)
warning("'refLevel' is from an ordered factor")
refLevel = as.character(refLevel) == levels(refLevel)
refLevel = (1:length(refLevel))[refLevel]
- if (!is.Numeric(refLevel, allow=1, integer = TRUE, posit = TRUE))
+ if (!is.Numeric(refLevel, allowable.length = 1, integer.valued = TRUE, positive = TRUE))
stop("could not coerce 'refLevel' into a single positive integer")
- } else if (!is.Numeric(refLevel, allow=1, integer = TRUE, posit = TRUE))
+ } else if (!is.Numeric(refLevel, allowable.length = 1, integer.valued = TRUE, positive = TRUE))
stop("'refLevel' must be a single positive integer")
new("vglmff",
blurb = c("Multinomial logit model\n\n",
if (refLevel < 0)
"Links: log(mu[,j]/mu[,M+1]), j=1:M,\n" else {
- if (refLevel==1)
+ if (refLevel == 1)
paste("Links: log(mu[,j]/mu[,", refLevel,
"]), j=2:(M+1),\n", sep = "") else
paste("Links: log(mu[,j]/mu[,", refLevel,
@@ -493,7 +495,7 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic = TRUE, ...)
constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints,
- intercept.apply=FALSE)
+ intercept.apply = FALSE)
constraints = cm.zero.vgam(constraints, x, .zero, M)
constraints = cm.nointercept.vgam(constraints, x, .nointercept, M)
}), list( .parallel = parallel, .zero = zero, .nointercept=nointercept,
@@ -521,7 +523,7 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic = TRUE, ...)
phat = cbind(1, exp(eta))
} else {
use.refLevel = if ( .refLevel < 0) M+1 else .refLevel
- etamat = cbind(eta[,1:( .refLevel - 1)], 0,
+ etamat = cbind(eta[, 1:( .refLevel - 1)], 0,
eta[,( .refLevel ):M])
phat = exp(etamat)
}
@@ -565,7 +567,7 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic = TRUE, ...)
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, docheck = FALSE))
+ log = TRUE, dochecking = FALSE))
},
vfamily = c("multinomial", "vcategorical"),
deriv = eval(substitute(expression({
@@ -583,7 +585,7 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic = TRUE, ...)
use.refLevel = if ( .refLevel < 0) M+1 else .refLevel
if (M == 1) {
- wz = mu[,3-use.refLevel] * (1-mu[,3-use.refLevel])
+ wz = mu[, 3-use.refLevel] * (1-mu[, 3-use.refLevel])
} else {
index = iam(NA, NA, M, both = TRUE, diag = TRUE)
myinc = (index$row.index >= use.refLevel)
@@ -592,7 +594,7 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic = TRUE, ...)
index$col.index[myinc] = index$col.index[myinc] + 1
wz = -mu[,index$row] * mu[,index$col]
- wz[,1:M] = wz[,1:M] + mu[, -use.refLevel ]
+ wz[, 1:M] = wz[, 1:M] + mu[, -use.refLevel ]
}
atiny = (mytiny %*% rep(1, ncol(mu))) > 0 # apply(mytiny, 1, any)
@@ -647,7 +649,7 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic = TRUE, ...)
}
}), list( .parallel = parallel, .mv = mv, .intercept.apply=intercept.apply ))),
deviance=eval(substitute(
- function(mu, y, w, residuals=FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
answer =
if ( .mv ) {
@@ -698,8 +700,8 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic = TRUE, ...)
Y.names = paste("Y", iii, sep = "")
mu.names = paste("mu", iii, ".", sep = "")
mynames = c(mynames, if ( .reverse )
- paste("P[",Y.names,">=",2:Llevels,"]", sep = "") else
- paste("P[",Y.names,"<=",1:(Llevels-1),"]", sep = ""))
+ paste("P[",Y.names,"> = ",2:Llevels,"]", sep = "") else
+ paste("P[",Y.names,"< = ",1:(Llevels-1),"]", sep = ""))
y.names = c(y.names, paste(mu.names, 1:Llevels, sep = ""))
}
predictors.names = namesof(mynames, .link, short = TRUE, earg = .earg)
@@ -710,8 +712,8 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic = TRUE, ...)
eval(process.categorical.data.vgam)
M = ncol(y)-1
mynames = if ( .reverse )
- paste("P[Y>=", 2:(1+M), "]", sep = "") else
- paste("P[Y<=", 1:M, "]", sep = "")
+ paste("P[Y> = ", 2:(1+M), "]", sep = "") else
+ paste("P[Y< = ", 1:M, "]", sep = "")
predictors.names =
namesof(mynames, .link, short = TRUE, earg = .earg)
y.names = paste("mu", 1:(M+1), sep = "")
@@ -743,7 +745,7 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic = TRUE, ...)
cump = cbind(eta2theta(eta[,cindex, drop = FALSE], .link,
earg= .earg), 1)
fv.matrix[,aindex] =
- cbind(cump[,1], tapplymat1(cump, "diff"))
+ cbind(cump[, 1], tapplymat1(cump, "diff"))
}
}
fv.matrix
@@ -754,7 +756,7 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic = TRUE, ...)
cbind(-tapplymat1(ccump, "diff"), ccump[,ncol(ccump)])
} else {
cump = cbind(eta2theta(eta, .link, earg = .earg), 1)
- cbind(cump[,1], tapplymat1(cump, "diff"))
+ cbind(cump[, 1], tapplymat1(cump, "diff"))
}
if (length(extra$dimnamesy2))
dimnames(fv.matrix) = list(dimnames(eta)[[1]], extra$dimnamesy2)
@@ -791,14 +793,14 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic = TRUE, ...)
aindex = (iii-1)*(Llevels) + 1:(Llevels)
cump = tapplymat1(as.matrix(mu[,aindex]), "cumsum")
eta.matrix[,cindex] =
- theta2eta(if ( .reverse) 1-cump[,1:(Llevels-1)] else
- cump[,1:(Llevels-1)], .link, earg = .earg)
+ theta2eta(if ( .reverse) 1-cump[, 1:(Llevels-1)] else
+ cump[, 1:(Llevels-1)], .link, earg = .earg)
}
eta.matrix
} else {
cump = tapplymat1(as.matrix(mu), "cumsum")
M = ncol(as.matrix(mu)) - 1
- theta2eta(if ( .reverse ) 1-cump[,1:M] else cump[,1:M], .link,
+ theta2eta(if ( .reverse ) 1-cump[, 1:M] else cump[, 1:M], .link,
earg= .earg)
}
answer
@@ -818,7 +820,7 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic = TRUE, ...)
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, docheck = FALSE))
+ log = TRUE, dochecking = FALSE))
},
vfamily = c("cumulative", "vcategorical"),
deriv = eval(substitute(expression({
@@ -835,7 +837,7 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic = TRUE, ...)
dcump.deta[,cindex] = dtheta.deta(cump, .link, earg = .earg)
resmat[,cindex] =
(y[,aindex, drop = FALSE]/mu.use[,aindex, drop = FALSE] -
- y[,1+aindex, drop = FALSE]/mu.use[,1+aindex, drop = FALSE])
+ y[, 1+aindex, drop = FALSE]/mu.use[, 1+aindex, drop = FALSE])
}
(if ( .reverse) -c(w) else c(w)) * dcump.deta * resmat
} else {
@@ -872,7 +874,7 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic = TRUE, ...)
oindex = (iii-1)*(Llevels-1) + 1:(Llevels-2)
wz[,cptrwz + 1 + (1:(Llevels-2))] =
-c(w) * dcump.deta[,oindex] *
- dcump.deta[,1+oindex]
+ dcump.deta[, 1+oindex]
cptrwz = cptrwz + Llevels - 1 # Move it along a bit
}
}
@@ -881,10 +883,10 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic = TRUE, ...)
}
} else {
- wz = c(w) * dcump.deta^2 * (1/mu.use[,1:M] + 1/mu.use[,-1])
+ wz = c(w) * dcump.deta^2 * (1/mu.use[, 1:M] + 1/mu.use[,-1])
if (M > 1)
wz = cbind(wz, -c(w) * dcump.deta[,-M] *
- dcump.deta[,2:M] / mu.use[,2:M])
+ dcump.deta[, 2:M] / mu.use[, 2:M])
}
wz
}), list( .earg = earg, .link = link, .mv = mv ))))
@@ -930,8 +932,8 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic = TRUE, ...)
eval(process.categorical.data.vgam)
M = ncol(y) - 1
mynames = if ( .reverse )
- paste("P[Y=", 1:M, "]/P[Y=", 2:(M+1), "]", sep = "") else
- paste("P[Y=", 2:(M+1), "]/P[Y=", 1:M, "]", sep = "")
+ paste("P[Y = ", 1:M, "]/P[Y = ", 2:(M+1), "]", sep = "") else
+ paste("P[Y = ", 2:(M+1), "]/P[Y = ", 1:M, "]", sep = "")
predictors.names = namesof(mynames, .link, short = TRUE, earg = .earg)
y.names = paste("mu", 1:(M+1), sep = "")
@@ -968,8 +970,8 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic = TRUE, ...)
}), list( .earg = earg, .link = link, .reverse = reverse ))),
linkfun = eval(substitute( function(mu, extra = NULL) {
M = ncol(mu) - 1
- theta2eta(if ( .reverse ) mu[,1:M] / mu[,-1] else
- mu[,-1] / mu[,1:M], .link, earg = .earg )
+ theta2eta(if ( .reverse ) mu[, 1:M] / mu[,-1] else
+ mu[,-1] / mu[, 1:M], .link, earg = .earg )
}, list( .earg = earg, .link = link, .reverse = reverse) )),
loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
if (residuals) stop("loglikelihood residuals ",
@@ -986,7 +988,7 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic = TRUE, ...)
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, docheck = FALSE))
+ log = TRUE, dochecking = FALSE))
},
vfamily = c("acat", "vcategorical"),
deriv = eval(substitute(expression({
@@ -996,7 +998,7 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic = TRUE, ...)
dzeta.deta = dtheta.deta(zeta, .link, earg = .earg )
if ( .reverse ) {
cumy = tapplymat1(y, "cumsum")
- c(w) * dzeta.deta * (cumy[,1:M] / zeta - score)
+ c(w) * dzeta.deta * (cumy[, 1:M] / zeta - score)
} else {
ccumy = tapplymat1(y[,ncol(y):1], "cumsum")[,ncol(y):1]
c(w) * dzeta.deta * (ccumy[,-1] / zeta - score)
@@ -1014,10 +1016,10 @@ vglm.vcategorical.control = function(maxit=30, trace=FALSE, panic = TRUE, ...)
score[,kay]) * dzeta.deta[,jay] * dzeta.deta[,kay]
if ( .reverse ) {
cump = tapplymat1(mu, "cumsum")
- wz[,1:M] = (cump[,1:M] / zeta^2 - score^2) * dzeta.deta^2
+ wz[, 1:M] = (cump[, 1:M] / zeta^2 - score^2) * dzeta.deta^2
} else {
ccump = tapplymat1(mu[,ncol(mu):1], "cumsum")[, ncol(mu):1]
- wz[,1:M] = (ccump[,-1] / zeta^2 - score^2) * dzeta.deta^2
+ wz[, 1:M] = (ccump[,-1] / zeta^2 - score^2) * dzeta.deta^2
}
c(w) * wz
}), list( .earg = earg, .link = link, .reverse = reverse ))))
@@ -1052,16 +1054,16 @@ acat.deriv = function(zeta, reverse, M, n)
- brat = function(refgp="last",
+ brat = function(refgp = "last",
refvalue = 1,
init.alpha = 1)
{
- if (!is.Numeric(init.alpha, posit = TRUE))
+ if (!is.Numeric(init.alpha, positive = TRUE))
stop("'init.alpha' must contain positive values only")
- if (!is.Numeric(refvalue, allow=1, posit = TRUE))
+ if (!is.Numeric(refvalue, allowable.length = 1, positive = TRUE))
stop("'refvalue' must be a single positive value")
if (!is.character(refgp) &&
- !is.Numeric(refgp, allow=1, integer = TRUE, posit = TRUE))
+ !is.Numeric(refgp, allowable.length = 1, integer.valued = TRUE, positive = TRUE))
stop("'refgp' must be a single positive integer")
new("vglmff",
@@ -1081,8 +1083,8 @@ acat.deriv = function(zeta, reverse, M, n)
refgp = .refgp
if (!intercept.only)
warning("this function only works with intercept-only models")
- extra$ybrat.indices = .brat.indices(NCo=M+1, are.ties=FALSE)
- uindex = if ( .refgp =="last") 1:M else (1:(M+1))[-( .refgp ) ]
+ extra$ybrat.indices = .brat.indices(NCo=M+1, are.ties = FALSE)
+ uindex = if ( .refgp == "last") 1:M else (1:(M+1))[-( .refgp ) ]
predictors.names=namesof(paste("alpha",uindex,sep = ""),"loge",short = TRUE)
}), list( .refgp = refgp, .init.alpha=init.alpha ))),
@@ -1120,12 +1122,12 @@ acat.deriv = function(zeta, reverse, M, n)
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, docheck = FALSE))
+ log = TRUE, dochecking = FALSE))
},
vfamily = c("brat"),
deriv = eval(substitute(expression({
ans = NULL
- uindex = if ( .refgp =="last") 1:M else (1:(M+1))[-( .refgp ) ]
+ uindex = if ( .refgp == "last") 1:M else (1:(M+1))[-( .refgp ) ]
eta = as.matrix(eta) # in case M=1
for (ii in 1:nrow(eta)) {
alpha = .brat.alpha(eta2theta(eta[ii,], "loge", earg = list()),
@@ -1154,7 +1156,7 @@ acat.deriv = function(zeta, reverse, M, n)
alpha[uindex] / (alpha[aa] + alpha[uindex])^2
}
if (M > 1) {
- ind5 = iam(1,1,M, both = TRUE, diag=FALSE)
+ ind5 = iam(1,1,M, both = TRUE, diag = FALSE)
wz[ii,(M+1):ncol(wz)] =
-(ymat[cbind(uindex[ind5$row],uindex[ind5$col])] +
ymat[cbind(uindex[ind5$col],uindex[ind5$row])]) *
@@ -1170,20 +1172,23 @@ acat.deriv = function(zeta, reverse, M, n)
-bratt = function(refgp="last",
+bratt = function(refgp = "last",
refvalue = 1,
init.alpha = 1,
i0 = 0.01)
{
- if (!is.Numeric(i0, allow=1, positi = TRUE))
- stop("'i0' must be a single positive value")
- if (!is.Numeric(init.alpha, positi = TRUE))
- stop("'init.alpha' must contain positive values only")
- if (!is.Numeric(refvalue, allow=1, positi = TRUE))
- stop("'refvalue' must be a single positive value")
+ if (!is.Numeric(i0, allowable.length = 1, positive = TRUE))
+ stop("'i0' must be a single positive value")
+ if (!is.Numeric(init.alpha, positive = TRUE))
+ stop("'init.alpha' must contain positive values only")
+ if (!is.Numeric(refvalue, allowable.length = 1, positive = TRUE))
+ stop("'refvalue' must be a single positive value")
if (!is.character(refgp) &&
- !is.Numeric(refgp, allow=1, integer = TRUE, positi = TRUE))
- stop("'refgp' must be a single positive integer")
+ !is.Numeric(refgp, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
+ stop("'refgp' must be a single positive integer")
+
+
new("vglmff",
blurb = c(paste("Bradley-Terry model (with ties)\n\n"),
"Links: ",
@@ -1191,7 +1196,8 @@ bratt = function(refgp="last",
initialize = eval(substitute(expression({
try.index = 1:400
M = (1:length(try.index))[(try.index*(try.index-1)) == ncol(y)]
- if (!is.Numeric(M, allow=1, integ = TRUE)) stop("cannot determine 'M'")
+ if (!is.Numeric(M, allowable.length = 1, integer.valued = TRUE))
+ stop("cannot determine 'M'")
NCo = M # number of contestants
are.ties = attr(y, "are.ties") # If Brat() was used
@@ -1213,10 +1219,10 @@ bratt = function(refgp="last",
if (!intercept.only)
warning("this function only works with intercept-only models")
extra$ties = ties # Flat (1-row) matrix
- extra$ybrat.indices = .brat.indices(NCo=NCo, are.ties=FALSE)
+ extra$ybrat.indices = .brat.indices(NCo=NCo, are.ties = FALSE)
extra$tbrat.indices = .brat.indices(NCo=NCo, are.ties = TRUE) # unused
extra$dnties = dimnames(ties)
- uindex = if (refgp=="last") 1:(NCo-1) else (1:(NCo))[-refgp ]
+ uindex = if (refgp == "last") 1:(NCo-1) else (1:(NCo))[-refgp ]
predictors.names=c(
namesof(paste("alpha",uindex,sep = ""),"loge",short = TRUE),
@@ -1259,7 +1265,7 @@ bratt = function(refgp="last",
ans = NULL
ties = extra$ties
NCo = M
- uindex = if ( .refgp =="last") 1:(M-1) else (1:(M))[-( .refgp )]
+ uindex = if ( .refgp == "last") 1:(M-1) else (1:(M))[-( .refgp )]
eta = as.matrix(eta)
for (ii in 1:nrow(eta)) {
alpha = .brat.alpha(eta2theta(eta[ii,-M],"loge"), .refvalue, .refgp)
@@ -1311,10 +1317,10 @@ bratt = function(refgp="last",
}
}
if (NCo > 2) {
- ind5 = iam(1,1, M=NCo, both = TRUE, diag=FALSE)
+ ind5 = iam(1,1, M=NCo, both = TRUE, diag = FALSE)
alphajunk = c(alpha, junk=NA)
mat4 = cbind(uindex[ind5$row],uindex[ind5$col])
- wz[ii,(M+1):ncol(wz)] = -(ymat[mat4] + ymat[mat4[,2:1]] +
+ wz[ii,(M+1):ncol(wz)] = -(ymat[mat4] + ymat[mat4[, 2:1]] +
tmat[mat4]) * alphajunk[uindex[ind5$col]] *
alphajunk[uindex[ind5$row]] / (alpha0 +
alphajunk[uindex[ind5$row]] + alphajunk[uindex[ind5$col]])^2
@@ -1334,21 +1340,22 @@ bratt = function(refgp="last",
.brat.alpha = function(vec, value, posn) {
- if (is.character(posn))
- if (posn!="last")
- stop("can only handle \"last\"") else return(c(vec, value))
- c(if (posn==1) NULL else vec[1:(posn-1)], value,
- if (posn==length(vec)+1) NULL else vec[posn:length(vec)])
+ if (is.character(posn))
+ if (posn != "last")
+ stop("can only handle \"last\"") else return(c(vec, value))
+ c(if (posn == 1) NULL else vec[1:(posn-1)], value,
+ if (posn == length(vec) + 1) NULL else vec[posn:length(vec)])
}
-.brat.indices = function(NCo, are.ties=FALSE) {
- if (!is.Numeric(NCo, allow=1, integ = TRUE) || NCo < 2)
- stop("bad input for 'NCo'")
- m = diag(NCo)
- if (are.ties) {
- cbind(rindex=row(m)[col(m) < row(m)], cindex=col(m)[col(m) < row(m)])
- } else
- cbind(rindex=row(m)[col(m) != row(m)], cindex=col(m)[col(m) != row(m)])
+
+.brat.indices = function(NCo, are.ties = FALSE) {
+ if (!is.Numeric(NCo, allowable.length = 1, integer.valued = TRUE) || NCo < 2)
+ stop("bad input for 'NCo'")
+ m = diag(NCo)
+ if (are.ties) {
+ cbind(rindex=row(m)[col(m) < row(m)], cindex=col(m)[col(m) < row(m)])
+ } else
+ cbind(rindex=row(m)[col(m) != row(m)], cindex=col(m)[col(m) != row(m)])
}
@@ -1377,8 +1384,8 @@ Brat = function(mat, ties=0*mat, string=c(" > "," == ")) {
dt = as.data.frame.table(ties)
dm = dm[!is.na(dm$Freq),]
dt = dt[!is.na(dt$Freq),]
- usethis1 = paste(dm[,1], string[1], dm[,2], sep = "")
- usethis2 = paste(dm[,1], string[2], dm[,2], sep = "")
+ usethis1 = paste(dm[, 1], string[1], dm[, 2], sep = "")
+ usethis2 = paste(dm[, 1], string[2], dm[, 2], sep = "")
ans = rbind(ans, matrix(dm$Freq, nrow=1))
ans.ties = rbind(ans.ties, matrix(dt$Freq, nrow=1))
}
@@ -1424,44 +1431,44 @@ InverseBrat = function(yvec, NCo =
-tapplymat1 = function(mat, function.arg=c("cumsum", "diff", "cumprod"))
+tapplymat1 = function(mat, function.arg = c("cumsum", "diff", "cumprod"))
{
- if (!missing(function.arg))
- function.arg = as.character(substitute(function.arg))
- function.arg = match.arg(function.arg, c("cumsum", "diff", "cumprod"))[1]
+ if (!missing(function.arg))
+ function.arg = as.character(substitute(function.arg))
+ function.arg = match.arg(function.arg, c("cumsum", "diff", "cumprod"))[1]
- type = switch(function.arg, cumsum=1, diff=2, cumprod=3,
- stop("function.arg not matched"))
+ type = switch(function.arg, cumsum = 1, diff = 2, cumprod = 3,
+ stop("function.arg not matched"))
- if (!is.matrix(mat))
- mat = as.matrix(mat)
- nr = nrow(mat)
- nc = ncol(mat)
- fred = dotC(name="tapplymat1", mat=as.double(mat),
- as.integer(nr), as.integer(nc), as.integer(type))
+ if (!is.matrix(mat))
+ mat = as.matrix(mat)
+ NR = nrow(mat)
+ NC = ncol(mat)
+ fred = dotC(name = "tapplymat1", mat=as.double(mat),
+ as.integer(NR), as.integer(NC), as.integer(type))
- dim(fred$mat) = c(nr, nc)
- dimnames(fred$mat) = dimnames(mat)
- switch(function.arg,
- cumsum =fred$mat,
- diff =fred$mat[,-1, drop = FALSE],
- cumprod=fred$mat)
+ dim(fred$mat) = c(NR, NC)
+ dimnames(fred$mat) = dimnames(mat)
+ switch(function.arg,
+ cumsum =fred$mat,
+ diff =fred$mat[,-1, drop = FALSE],
+ cumprod=fred$mat)
}
ordpoisson = function(cutpoints,
- countdata=FALSE, NOS = NULL, Levels = NULL,
+ countdata = FALSE, NOS = NULL, Levels = NULL,
init.mu = NULL, parallel = FALSE, zero = NULL,
link = "loge", earg = list()) {
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
fcutpoints = cutpoints[is.finite(cutpoints)]
- if (!is.Numeric(fcutpoints, integ = TRUE) || any(fcutpoints < 0))
+ if (!is.Numeric(fcutpoints, integer.valued = TRUE) || any(fcutpoints < 0))
stop("'cutpoints' must have non-negative integer or Inf values only")
if (is.finite(cutpoints[length(cutpoints)]))
cutpoints = c(cutpoints, Inf)
@@ -1469,9 +1476,9 @@ tapplymat1 = function(mat, function.arg=c("cumsum", "diff", "cumprod"))
if (!is.logical(countdata) || length(countdata) != 1)
stop("argument 'countdata' must be a single logical")
if (countdata) {
- if (!is.Numeric(NOS, integ = TRUE, posit = TRUE))
+ if (!is.Numeric(NOS, integer.valued = TRUE, positive = TRUE))
stop("'NOS' must have integer values only")
- if (!is.Numeric(Levels, integ = TRUE, posit = TRUE) || any(Levels < 2))
+ if (!is.Numeric(Levels, integer.valued = TRUE, positive = TRUE) || any(Levels < 2))
stop("'Levels' must have integer values (>= 2) only")
Levels = rep(Levels, length=NOS)
}
@@ -1581,7 +1588,7 @@ tapplymat1 = function(mat, function.arg=c("cumsum", "diff", "cumprod"))
resmat = matrix(0, n, M)
dl.dprob = y / probs.use
dmu.deta = dtheta.deta(mu, .link, earg=.earg)
- dprob.dmu = ordpoissonProbs(extra, mu, deriv=1)
+ dprob.dmu = ordpoissonProbs(extra, mu, deriv = 1)
cptr = 1
for (iii in 1:NOS) {
for (kkk in 1:Levels[iii]) {
@@ -1604,46 +1611,48 @@ tapplymat1 = function(mat, function.arg=c("cumsum", "diff", "cumprod"))
}
wz = c(w) * d2l.dmu2 * dmu.deta^2
wz
- }), list( .earg = earg, .link = link, .countdata=countdata ))))
+ }), list( .earg = earg, .link = link, .countdata = countdata ))))
}
-ordpoissonProbs = function(extra, mu, deriv=0) {
- cp.vector = extra$cutpoints
- NOS = extra$NOS
+
+
+ordpoissonProbs = function(extra, mu, deriv = 0) {
+ cp.vector = extra$cutpoints
+ NOS = extra$NOS
+ if (deriv == 1) {
+ dprob.dmu = matrix(0, extra$n, extra$ncoly)
+ } else {
+ probs = matrix(0, extra$n, extra$ncoly)
+ }
+ mu = cbind(mu)
+ cptr = 1
+ for (iii in 1:NOS) {
if (deriv == 1) {
- dprob.dmu = matrix(0, extra$n, extra$ncoly)
+ dprob.dmu[,cptr] = -dpois(x = cp.vector[cptr], lambda = mu[,iii])
} else {
- probs = matrix(0, extra$n, extra$ncoly)
+ probs[,cptr] = ppois(q = cp.vector[cptr], lambda = mu[,iii])
}
- mu = cbind(mu)
- cptr = 1
- for (iii in 1:NOS) {
- if (deriv == 1) {
- dprob.dmu[,cptr] = -dpois(x=cp.vector[cptr], lamb=mu[,iii])
- } else {
- probs[,cptr] = ppois(q=cp.vector[cptr], lambda=mu[,iii])
- }
- cptr = cptr + 1
- while(is.finite(cp.vector[cptr])) {
- if (deriv == 1) {
- dprob.dmu[,cptr] = dpois(x=cp.vector[cptr-1], lamb=mu[,iii]) -
- dpois(x=cp.vector[cptr], lambda=mu[,iii])
- } else {
- probs[,cptr] = ppois(q=cp.vector[cptr], lambda=mu[,iii]) -
- ppois(q=cp.vector[cptr-1], lambda=mu[,iii])
- }
- cptr = cptr + 1
- }
- if (deriv == 1) {
- dprob.dmu[,cptr] = dpois(x=cp.vector[cptr-1], lamb=mu[,iii]) -
- dpois(x=cp.vector[cptr], lambda=mu[,iii])
- } else {
- probs[,cptr] = ppois(q=cp.vector[cptr], lamb=mu[,iii]) -
- ppois(q=cp.vector[cptr-1], lambda=mu[,iii])
- }
- cptr = cptr + 1
+ cptr = cptr + 1
+ while(is.finite(cp.vector[cptr])) {
+ if (deriv == 1) {
+ dprob.dmu[,cptr] = dpois(x = cp.vector[cptr-1], lambda = mu[,iii]) -
+ dpois(x = cp.vector[cptr], lambda = mu[,iii])
+ } else {
+ probs[,cptr] = ppois(q = cp.vector[cptr], lambda = mu[,iii]) -
+ ppois(q = cp.vector[cptr-1], lambda = mu[,iii])
+ }
+ cptr = cptr + 1
}
- if (deriv == 1) dprob.dmu else probs
+ if (deriv == 1) {
+ dprob.dmu[,cptr] = dpois(x = cp.vector[cptr-1], lambda = mu[,iii]) -
+ dpois(x = cp.vector[cptr], lambda = mu[,iii])
+ } else {
+ probs[,cptr] = ppois(q = cp.vector[cptr], lambda = mu[,iii]) -
+ ppois(q = cp.vector[cptr-1], lambda = mu[,iii])
+ }
+ cptr = cptr + 1
+ }
+ if (deriv == 1) dprob.dmu else probs
}
@@ -1653,7 +1662,7 @@ ordpoissonProbs = function(extra, mu, deriv=0) {
if (FALSE)
scumulative = function(link = "logit", earg = list(),
- lscale="loge", escale = list(),
+ lscale = "loge", escale = list(),
parallel = FALSE, sparallel = TRUE, reverse = FALSE,
iscale = 1)
{
@@ -1664,7 +1673,7 @@ ordpoissonProbs = function(extra, mu, deriv=0) {
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
if (!is.list(escale)) escale = list()
- if (!is.Numeric(iscale, posit = TRUE))
+ if (!is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
if (!is.logical(reverse) || length(reverse) != 1)
stop("argument 'reverse' must be a single logical")
@@ -1697,7 +1706,7 @@ ordpoissonProbs = function(extra, mu, deriv=0) {
(constraints[[ii]])[interleave.VGAM(M, M=2),, drop = FALSE]
}), list( .parallel = parallel, .sparallel=sparallel ))),
deviance=eval(substitute(
- function(mu, y, w, residuals=FALSE, eta, extra = NULL) {
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
answer =
Deviance.categorical.data.vgam(mu=mu, y=y, w=w, residuals=residuals,
eta=eta, extra=extra)
@@ -1711,8 +1720,8 @@ ordpoissonProbs = function(extra, mu, deriv=0) {
M = 2*(ncol(y)-1)
J = M / 2
extra$J = J
- mynames = if ( .reverse ) paste("P[Y>=",2:(1+J),"]", sep = "") else
- paste("P[Y<=",1:J,"]", sep = "")
+ mynames = if ( .reverse ) paste("P[Y> = ",2:(1+J),"]", sep = "") else
+ paste("P[Y< = ",1:J,"]", sep = "")
predictors.names = c(
namesof(mynames, .link, short = TRUE, earg = .earg),
namesof(paste("scale_", 1:J, sep = ""),
@@ -1729,8 +1738,8 @@ ordpoissonProbs = function(extra, mu, deriv=0) {
linkinv = eval(substitute( function(eta, extra = NULL) {
J = extra$J
M = 2*J
- etamat1 = eta[,2*(1:J)-1, drop = FALSE]
- etamat2 = eta[,2*(1:J), drop = FALSE]
+ etamat1 = eta[, 2*(1:J)-1, drop = FALSE]
+ etamat2 = eta[, 2*(1:J), drop = FALSE]
scalemat = eta2theta(etamat2, .lscale, earg = .escale)
fv.matrix =
if ( .reverse ) {
@@ -1738,7 +1747,7 @@ ordpoissonProbs = function(extra, mu, deriv=0) {
cbind(-tapplymat1(ccump, "diff"), ccump[,ncol(ccump)])
} else {
cump = cbind(eta2theta(etamat1/scalemat, .link, earg = .earg), 1)
- cbind(cump[,1], tapplymat1(cump, "diff"))
+ cbind(cump[, 1], tapplymat1(cump, "diff"))
}
if (length(extra$dimnamesy2))
dimnames(fv.matrix) = list(dimnames(eta)[[1]], extra$dimnamesy2)
@@ -1766,7 +1775,7 @@ ordpoissonProbs = function(extra, mu, deriv=0) {
J = ncol(as.matrix(mu)) - 1
M = 2 * J
answer = cbind(
- theta2eta(if ( .reverse ) 1-cump[,1:J] else cump[,1:J], .link,
+ theta2eta(if ( .reverse ) 1-cump[, 1:J] else cump[, 1:J], .link,
earg= .earg),
matrix(theta2eta( .iscale, .lscale, earg = .escale),
nrow(as.matrix(mu)), J, byrow = TRUE))
@@ -1789,7 +1798,7 @@ ordpoissonProbs = function(extra, mu, deriv=0) {
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, docheck = FALSE))
+ log = TRUE, dochecking = FALSE))
},
vfamily = c("scumulative", "vcategorical"),
deriv = eval(substitute(expression({
@@ -1798,15 +1807,15 @@ ordpoissonProbs = function(extra, mu, deriv=0) {
J = extra$J
mu.use = pmax(mu, .Machine$double.eps * 1.0e-0)
- etamat1 = eta[,2*(1:J)-1, drop = FALSE]
- etamat2 = eta[,2*(1:J), drop = FALSE]
+ etamat1 = eta[, 2*(1:J)-1, drop = FALSE]
+ etamat2 = eta[, 2*(1:J), drop = FALSE]
scalemat = eta2theta(etamat2, .lscale, earg = .escale)
cump = eta2theta(etamat1 / scalemat, .link, earg = .earg)
dcump.deta = dtheta.deta(cump, .link, earg = .earg)
dscale.deta = dtheta.deta(scalemat, .lscale, earg = .escale)
dl.dcump = (if ( .reverse) -w else w) *
- (y[,1:J]/mu.use[,1:J] - y[,-1]/mu.use[,-1])
+ (y[, 1:J]/mu.use[, 1:J] - y[,-1]/mu.use[,-1])
dcump.dscale = -dcump.deta * etamat1 / scalemat^2
ans = cbind(dl.dcump * dcump.deta / scalemat,
dl.dcump * dcump.dscale * dscale.deta)
@@ -1819,25 +1828,25 @@ ordpoissonProbs = function(extra, mu, deriv=0) {
wz = matrix(0, n, 2*(2*M-3))
- wz[,2*(1:J)-1] = if (ooz) c(w) * (dcump.deta / scalemat)^2 *
- (1/mu.use[,1:J] + 1/mu.use[,-1]) else 1
- wz[,2*(1:J)] = if (ooz) 1 else c(w) * (dcump.dscale * dscale.deta)^2 *
- (1/mu.use[,1:J] + 1/mu.use[,-1])
+ wz[, 2*(1:J)-1] = if (ooz) c(w) * (dcump.deta / scalemat)^2 *
+ (1/mu.use[, 1:J] + 1/mu.use[,-1]) else 1
+ wz[, 2*(1:J)] = if (ooz) 1 else c(w) * (dcump.dscale * dscale.deta)^2 *
+ (1/mu.use[, 1:J] + 1/mu.use[,-1])
wz0 = c(w) * (dcump.deta / scalemat) *
(dcump.dscale * dscale.deta) *
- (1/mu.use[,1:J] + 1/mu.use[,-1])
+ (1/mu.use[, 1:J] + 1/mu.use[,-1])
wz0 = as.matrix(wz0)
for (ii in 1:J)
wz[,iam(2*ii-1,2*ii,M=M)] = if (ooz) wz0[,ii] else 0
if (J > 1) {
wz0 = -c(w) * (dcump.deta[,-J] / scalemat[,-J]) *
- (dcump.deta[,-1] / scalemat[,-1]) / mu.use[,2:J]
+ (dcump.deta[,-1] / scalemat[,-1]) / mu.use[, 2:J]
wz0 = as.matrix(wz0) # Just in case J=2
for (ii in 1:(J-1))
wz[,iam(2*ii-1,2*ii+1,M=M)] = if (ooz) wz0[,ii] else 0
wz0 = -c(w) * (dcump.dscale[,-1] * dscale.deta[,-1]) *
- (dcump.dscale[,-J] * dscale.deta[,-J]) / mu.use[,2:J]
+ (dcump.dscale[,-J] * dscale.deta[,-J]) / mu.use[, 2:J]
wz0 = as.matrix(wz0)
for (ii in 1:(J-1))
wz[,iam(2*ii,2*ii+2,M=M)] = if (ooz) wz0[,ii] else 0
@@ -1845,12 +1854,12 @@ ordpoissonProbs = function(extra, mu, deriv=0) {
wz0 = -c(w) * (dcump.deta[,-J] / scalemat[,-J]) *
- (dcump.dscale[,-1] * dscale.deta[,-1]) / mu.use[,2:J]
+ (dcump.dscale[,-1] * dscale.deta[,-1]) / mu.use[, 2:J]
wz0 = as.matrix(wz0)
for (ii in 1:(J-1))
wz[,iam(2*ii-1,2*ii+2,M=M)] = if (ooz) wz0[,ii] else 0
wz0 = -c(w) * (dcump.deta[,-1] / scalemat[,-1]) *
- (dcump.dscale[,-J] * dscale.deta[,-J]) / mu.use[,2:J]
+ (dcump.dscale[,-J] * dscale.deta[,-J]) / mu.use[, 2:J]
wz0 = as.matrix(wz0)
for (ii in 1:(J-1))
wz[,iam(2*ii,2*ii+1,M=M)] = if (ooz) wz0[,ii] else 0
@@ -1892,7 +1901,7 @@ margeff = function(object, subset = NULL) {
if (model.multinomial) {
rlev = object at misc$refLevel
- cfit = coefvlm(object, matrix = TRUE)
+ cfit = coefvlm(object, matrix.out = TRUE)
B = if (!length(rlev)) {
cbind(cfit, 0)
} else {
@@ -1901,7 +1910,7 @@ margeff = function(object, subset = NULL) {
} else if (rlev == 1) {
cbind(0, cfit)
} else {
- cbind(cfit[,1:(rlev-1)], 0, cfit[,rlev:M])
+ cbind(cfit[, 1:(rlev-1)], 0, cfit[,rlev:M])
}
}
ppp = nrow(B)
@@ -1954,7 +1963,7 @@ margeff = function(object, subset = NULL) {
reverse = object at misc$reverse
linkfunctions = object at misc$link
all.eargs = object at misc$earg
- B = cfit = coefvlm(object, matrix = TRUE)
+ B = cfit = coefvlm(object, matrix.out = TRUE)
ppp = nrow(B)
hdot = lpmat = kronecker(predict(object), matrix(1, ppp, 1))
@@ -2018,7 +2027,7 @@ prplot = function(object,
control = prplot.control(...)
- object = plotvgam(object, plot.arg=FALSE, raw=FALSE) # , ...
+ object = plotvgam(object, plot.arg = FALSE, raw = FALSE) # , ...
if (length(names(object at preplot)) != 1)
stop("object needs to have only one term")
@@ -2033,16 +2042,16 @@ prplot = function(object,
use.y[,ii] = eta2theta(use.y[,ii], link=object at misc$link[[ii]],
earg=object at misc$earg[[ii]])
}
- if (ncol(use.y) != MM) use.y = use.y[,1:MM, drop = FALSE]
+ if (ncol(use.y) != MM) use.y = use.y[, 1:MM, drop = FALSE]
use.x = (object at preplot[[1]])$x
myxlab = if (length(control$xlab)) control$xlab else (object at preplot[[1]])$xlab
- mymain = if (MM <= 3) paste(object at misc$parameters, collapse=", ") else
- paste(object at misc$parameters[c(1,MM)], collapse=",...,")
+ mymain = if (MM <= 3) paste(object at misc$parameters, collapse = ", ") else
+ paste(object at misc$parameters[c(1, MM)], collapse = ",...,")
if (length(control$main)) mymain = control$main
if (length(control$ylab)) myylab = control$ylab
- matplot(use.x, use.y, type="l", xlab=myxlab, ylab=myylab,
+ matplot(use.x, use.y, type = "l", xlab=myxlab, ylab=myylab,
lty=control$lty, col=control$col, las=control$las,
xlim=if (is.Numeric(control$xlim)) control$xlim else range(use.x),
ylim=if (is.Numeric(control$ylim)) control$ylim else range(use.y),
@@ -2056,7 +2065,7 @@ prplot = function(object,
- prplot.control = function(xlab = NULL, ylab="Probability", main = NULL,
+ prplot.control = function(xlab = NULL, ylab = "Probability", main = NULL,
xlim = NULL, ylim = NULL,
lty=par()$lty,
col=par()$col,
@@ -2064,7 +2073,7 @@ prplot = function(object,
lwd=par()$lwd,
rlwd=par()$lwd,
las=par()$las,
- rug.arg =FALSE,
+ rug.arg = FALSE,
...) {
list(xlab=xlab, ylab=ylab,
diff --git a/R/family.censored.R b/R/family.censored.R
index 230b721..327f388 100644
--- a/R/family.censored.R
+++ b/R/family.censored.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -95,7 +95,7 @@
} else {
sum(w[cen0] * dpois(y[cen0,1], mu[cen0], log = TRUE)) +
sum(w[cenU] * log1p(-ppois(y[cenU,1] - 1, mu[cenU]))) +
- sum(w[cenL] * ppois(y[cenL,1] - 1, mu[cenL], log = TRUE)) +
+ sum(w[cenL] * ppois(y[cenL,1] - 1, mu[cenL], log.p = TRUE)) +
sum(w[cenI] * log(ppois(y[cenI,2], mu[cenI]) -
ppois(y[cenI,1], mu[cenI])))
}
@@ -110,19 +110,19 @@
dl.dlambda = (y[,1] - lambda)/lambda # uncensored
yllim = yulim = y[,1] # uncensored
if (any(cenU)) {
- yllim[cenU] = y[cenU,1]
- densm1 = dpois(yllim-1, lambda)
- queue = ppois(yllim-1, lambda, lower = FALSE) # Right tail probability
- dl.dlambda[cenU] = densm1[cenU] / queue[cenU]
+ yllim[cenU] = y[cenU,1]
+ densm1 = dpois(yllim-1, lambda)
+ queue = ppois(yllim-1, lambda, lower.tail = FALSE)
+ dl.dlambda[cenU] = densm1[cenU] / queue[cenU]
}
if (any(cenL)) {
- yulim[cenL] = y[cenL,1]-1
+ yulim[cenL] = y[cenL,1] - 1
densm0 = dpois(yulim, lambda)
Queue = ppois(yulim, lambda) # Left tail probability
dl.dlambda[cenL] = -densm0[cenL] / Queue[cenL]
}
if (any(cenI)) {
- yllim[cenI] = y[cenI,1]+1
+ yllim[cenI] = y[cenI,1] + 1
yulim[cenI] = y[cenI,2]
Queue1 = ppois(yllim-1, lambda)
Queue2 = ppois(yulim, lambda)
@@ -166,7 +166,7 @@ if (FALSE)
cexpon =
ecexpon = function(link = "loge", location = 0)
{
- if (!is.Numeric(location, allow = 1))
+ if (!is.Numeric(location, allowable.length = 1))
stop("bad input for 'location'")
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
@@ -313,7 +313,7 @@ if (FALSE)
lmu = as.character(substitute(lmu))
if (mode(lsd) != "character" && mode(lsd) != "name")
lsd = as.character(substitute(lsd))
- if (!is.Numeric(imethod, allow = 1, integer = TRUE, positi = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 2)
stop("argument 'imethod' must be 1 or 2")
if (!is.list(emu)) emu = list()
@@ -572,15 +572,16 @@ if (FALSE)
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (!is.Numeric(imethod, allow = 1, integer = TRUE, positi = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 2)
stop("argument 'imethod' must be 1 or 2")
if (!is.list(eshape)) eshape = list()
if (!is.list(escale)) escale = list()
- if (!is.Numeric(nrfs, allow = 1) || nrfs < 0 || nrfs > 1)
+ if (!is.Numeric(nrfs, allowable.length = 1) || nrfs < 0 || nrfs > 1)
stop("bad input for argument 'nrfs'")
new("vglmff",
@@ -849,18 +850,17 @@ is.na.SurvS4 <- function(x) {
-print.SurvS4 <-
-function (x, quote = FALSE, ...)
-invisible(print(as.character.SurvS4(x), quote = quote, ...))
-setMethod("print", "SurvS4",
- function(x, ...)
- invisible(print.SurvS4(x, ...)))
+show.SurvS4 <- function (object)
+ print(as.character.SurvS4(object), quote = FALSE)
+
+
+
setMethod("show", "SurvS4",
function(object)
- invisible(print.SurvS4(object)))
+ show.SurvS4(object))
diff --git a/R/family.circular.R b/R/family.circular.R
index bb0e294..d1b9882 100644
--- a/R/family.circular.R
+++ b/R/family.circular.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -76,7 +76,7 @@ rcard = function(n, mu, rho, ...) {
stop("'mu' must be between 0 and 2*pi inclusive")
if (!is.Numeric(rho) || max(abs(rho) > 0.5))
stop("'rho' must be between -0.5 and 0.5 inclusive")
- if (!is.Numeric(n, positive = TRUE, integer = TRUE, allow=1))
+ if (!is.Numeric(n, positive = TRUE, integer.valued = TRUE, allowable.length = 1))
stop("'n' must be a single positive integer")
mu = rep(mu, len=n)
rho = rep(rho, len=n)
@@ -108,7 +108,7 @@ cardioid.control <- function(save.weight = TRUE, ...)
stop("bad input for argument 'irho'")
if (!is.list(emu)) emu = list()
if (!is.list(erho)) erho = list()
- if (!is.Numeric(nsimEIM, allow=1, integ = TRUE) || nsimEIM <= 50)
+ if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE) || nsimEIM <= 50)
stop("'nsimEIM' should be an integer greater than 50")
new("vglmff",
@@ -221,9 +221,9 @@ cardioid.control <- function(save.weight = TRUE, ...)
llocation = as.character(substitute(llocation))
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
- if (!is.Numeric(imethod, allow=1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 2) stop("argument 'imethod' must be 1 or 2")
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(escale)) escale = list()
diff --git a/R/family.exp.R b/R/family.exp.R
index 6f00bdd..25980dc 100644
--- a/R/family.exp.R
+++ b/R/family.exp.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -19,7 +19,8 @@ qeunif <- function(p, min = 0, max = 1, Maxit_nr = 10, Tol_nr = 1.0e-6) {
smallno = 0.10
if (any(min >= max))
stop("argument 'min' has values greater or equal to argument 'max'")
- if (!is.Numeric( Tol_nr, allow = 1, posit = TRUE) || Tol_nr > 0.10)
+ if (!is.Numeric( Tol_nr, allowable.length = 1, positive = TRUE) ||
+ Tol_nr > 0.10)
stop("argument 'Tol_nr' is not a single positive value, or is too large")
nrok = ppp >= vsmallno & ppp <= 1.0 - vsmallno & is.finite(ppp)
@@ -96,7 +97,7 @@ deunif <- function(x, min = 0, max = 1, log = FALSE) {
reunif <- function(n, min = 0, max = 1) {
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ if (!is.Numeric(n, integer.valued = TRUE, allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
qeunif(runif(use.n), min = min, max = max)
}
@@ -107,7 +108,8 @@ reunif <- function(n, min = 0, max = 1) {
qenorm <- function(p, mean = 0, sd = 1, Maxit_nr = 10, Tol_nr = 1.0e-6) {
ppp = p
- if (!is.Numeric( Tol_nr, allow = 1, posit = TRUE) || Tol_nr > 0.10)
+ if (!is.Numeric( Tol_nr, allowable.length = 1, positive = TRUE) ||
+ Tol_nr > 0.10)
stop("argument 'Tol_nr' is not a single ",
"positive value, or is too large")
nrok = is.finite(ppp)
@@ -176,7 +178,7 @@ denorm <- function(x, mean = 0, sd = 1, log = FALSE) {
renorm <- function(n, mean = 0, sd = 1) {
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
+ if (!is.Numeric(n, integer.valued = TRUE, allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
qenorm(runif(use.n), mean = mean, sd = sd)
}
@@ -190,8 +192,10 @@ renorm <- function(n, mean = 0, sd = 1) {
qeexp <- function(p, rate = 1, Maxit_nr = 10, Tol_nr = 1.0e-6) {
ppp = p
vsmallno = sqrt(.Machine$double.eps)
- if (!is.Numeric( Tol_nr, allow = 1, posit = TRUE) || Tol_nr > 0.10)
- stop("argument 'Tol_nr' is not a single positive value, or is too large")
+ if (!is.Numeric( Tol_nr, allowable.length = 1, positive = TRUE) ||
+ Tol_nr > 0.10)
+ stop("argument 'Tol_nr' is not a single positive value, or ",
+ "is too large")
nrok = ppp >= vsmallno & is.finite(ppp)
@@ -253,7 +257,7 @@ deexp <- function(x, rate = 1, log = FALSE) {
eee = x * rate
if (log.arg) {
- ans = log(eee) - eee + 2.0 * log((1-y) - 2 * exp(-y)) + log(rate)
+ ans = log(eee) - eee + 2.0 * log((1-x) - 2 * exp(-x)) + log(rate)
} else {
gexp = function(y)
as.numeric(y >= 0) * y * exp(-y) / ((1-y) - 2 * exp(-y))^2
@@ -266,10 +270,11 @@ deexp <- function(x, rate = 1, log = FALSE) {
reexp <- function(n, rate = 1) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ=TRUE, allow=1, posit=TRUE))
- stop("bad input for argument 'n'") else n
- qeexp(runif(use.n), rate = rate)
+ use.n = if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+ qeexp(runif(use.n), rate = rate)
}
@@ -354,7 +359,7 @@ rkoenker <- function(n, location = 0, scale = 1) {
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale <- as.character(substitute(lscale))
if (length(ilocat) &&
- (!is.Numeric(ilocat, allow = 1, positive = TRUE)))
+ (!is.Numeric(ilocat, allowable.length = 1, positive = TRUE)))
stop("bad input for argument 'ilocation'")
if (length(iscale) && !is.Numeric(iscale))
stop("bad input for argument 'iscale'")
@@ -362,10 +367,11 @@ rkoenker <- function(n, location = 0, scale = 1) {
if (!is.list(elocat)) elocat = list()
if (!is.list(escale)) escale = list()
- if (!is.Numeric(percentile, posit = TRUE) ||
+ if (!is.Numeric(percentile, positive = TRUE) ||
any(percentile >= 100))
stop("bad input for argument 'percentile'")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 2)
stop("'imethod' must be 1 or 2")
diff --git a/R/family.extremes.R b/R/family.extremes.R
index bbc4d2d..6b397ff 100644
--- a/R/family.extremes.R
+++ b/R/family.extremes.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -13,123 +13,135 @@
rgev <- function(n, location = 0, scale = 1, shape = 0) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
- stop("bad input for argument 'n'") else n
-
- if (!is.Numeric(location))
- stop("bad input for argument argument 'location'")
- if (!is.Numeric(shape))
- stop("bad input for argument argument 'shape'")
-
- ans = numeric(use.n)
- shape = rep(shape, len = use.n); location = rep(location, len = use.n);
- scale = rep(scale, len = use.n)
- scase = abs(shape) < sqrt(.Machine$double.eps)
- nscase = sum(scase)
- if (use.n - nscase)
- ans[!scase] = location[!scase] + scale[!scase] *
- ((-log(runif(use.n - nscase)))^(-shape[!scase]) -1) / shape[!scase]
- if (nscase)
- ans[scase] = rgumbel(nscase, location[scase], scale[scase])
- ans[scale <= 0] = NaN
- ans
+ use.n = if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+
+ if (!is.Numeric(location))
+ stop("bad input for argument argument 'location'")
+ if (!is.Numeric(shape))
+ stop("bad input for argument argument 'shape'")
+
+ ans = numeric(use.n)
+ shape = rep(shape, length.out = use.n);
+ location = rep(location, length.out = use.n);
+ scale = rep(scale, length.out = use.n)
+
+ scase = abs(shape) < sqrt(.Machine$double.eps)
+ nscase = sum(scase)
+ if (use.n - nscase)
+ ans[!scase] = location[!scase] + scale[!scase] *
+ ((-log(runif(use.n - nscase)))^(-shape[!scase]) -1) / shape[!scase]
+ if (nscase)
+ ans[scase] = rgumbel(nscase, location[scase], scale[scase])
+ ans[scale <= 0] = NaN
+ ans
}
-dgev <- function(x, location = 0, scale = 1, shape = 0, log = FALSE,
- tolshape0 = sqrt(.Machine$double.eps),
- oobounds.log = -Inf, giveWarning = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
- if (oobounds.log > 0)
- stop("bad input for argument 'oobounds.log'")
-
- if (!is.Numeric(tolshape0, allow = 1, posit = TRUE))
- stop("bad input for argument 'tolshape0'")
- use.n = max(length(x), length(location), length(scale), length(shape))
- shape = rep(shape, len = use.n); location = rep(location, len = use.n);
- scale = rep(scale, len = use.n);
- x = rep(x, len = use.n)
-
- logdensity = rep(log(0), len = use.n)
- scase = abs(shape) < tolshape0
- nscase = sum(scase)
- if (use.n - nscase) {
- zedd = 1+shape*(x-location)/scale # pmax(0, (1+shape*xc/scale))
- xok = (!scase) & (zedd > 0)
- logdensity[xok] = -log(scale[xok]) - zedd[xok]^(-1/shape[xok]) -
- (1 + 1/shape[xok]) * log(zedd[xok])
- outofbounds = (!scase) & (zedd <= 0)
- if (any(outofbounds)) {
- logdensity[outofbounds] = oobounds.log
- no.oob = sum(outofbounds)
- if (giveWarning)
- warning(no.oob, " observation",
- ifelse(no.oob > 1, "s are", " is"), " out of bounds")
- }
- }
- if (nscase) {
- logdensity[scase] = dgumbel(x[scase], loc=location[scase],
- sc=scale[scase], log = TRUE)
+ dgev <- function(x, location = 0, scale = 1, shape = 0, log = FALSE,
+ tolshape0 = sqrt(.Machine$double.eps),
+ oobounds.log = -Inf, giveWarning = FALSE) {
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+ if (oobounds.log > 0)
+ stop("bad input for argument 'oobounds.log'")
+
+ if (!is.Numeric(tolshape0, allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'tolshape0'")
+
+ use.n = max(length(x), length(location), length(scale), length(shape))
+ shape = rep(shape, length.out = use.n)
+ location = rep(location, length.out = use.n);
+ scale = rep(scale, length.out = use.n);
+ x = rep(x, length.out = use.n)
+
+ logdensity = rep(log(0), length.out = use.n)
+ scase = abs(shape) < tolshape0
+ nscase = sum(scase)
+ if (use.n - nscase) {
+ zedd = 1+shape*(x-location)/scale # pmax(0, (1+shape*xc/scale))
+ xok = (!scase) & (zedd > 0)
+ logdensity[xok] = -log(scale[xok]) - zedd[xok]^(-1/shape[xok]) -
+ (1 + 1/shape[xok]) * log(zedd[xok])
+ outofbounds = (!scase) & (zedd <= 0)
+ if (any(outofbounds)) {
+ logdensity[outofbounds] = oobounds.log
+ no.oob = sum(outofbounds)
+ if (giveWarning)
+ warning(no.oob, " observation",
+ ifelse(no.oob > 1, "s are", " is"), " out of bounds")
}
+ }
+ if (nscase) {
+ logdensity[scase] = dgumbel(x[scase], location = location[scase],
+ scale = scale[scase], log = TRUE)
+ }
- logdensity[scale <= 0] = NaN
- if (log.arg) logdensity else exp(logdensity)
+ logdensity[scale <= 0] = NaN
+ if (log.arg) logdensity else exp(logdensity)
}
pgev <- function(q, location = 0, scale = 1, shape = 0) {
- if (!is.Numeric(q))
- stop("bad input for argument 'q'")
- if (!is.Numeric(location))
- stop("bad input for argument 'location'")
- if (!is.Numeric(shape))
- stop("bad input for argument 'shape'")
-
- use.n = max(length(q), length(location), length(scale), length(shape))
- ans = numeric(use.n)
- shape = rep(shape, len = use.n); location = rep(location, len = use.n);
- scale = rep(scale, len = use.n); q = rep(q-location, len = use.n)
- scase = abs(shape) < sqrt(.Machine$double.eps)
- nscase = sum(scase)
- if (use.n - nscase) {
- zedd = pmax(0,(1+shape*q/scale))
- ans[!scase] = exp(-zedd[!scase]^(-1/shape[!scase]))
- }
- if (nscase)
- ans[scase] = pgumbel(q[scase], location[scase], scale[scase])
- ans[scale <= 0] = NaN
- ans
+ if (!is.Numeric(q))
+ stop("bad input for argument 'q'")
+ if (!is.Numeric(location))
+ stop("bad input for argument 'location'")
+ if (!is.Numeric(shape))
+ stop("bad input for argument 'shape'")
+
+ use.n = max(length(q), length(location), length(scale), length(shape))
+ ans = numeric(use.n)
+ shape = rep(shape, length.out = use.n)
+ location = rep(location, length.out = use.n);
+ scale = rep(scale, length.out = use.n)
+ q = rep(q - location, length.out = use.n)
+
+ scase = abs(shape) < sqrt(.Machine$double.eps)
+ nscase = sum(scase)
+ if (use.n - nscase) {
+ zedd = pmax(0, (1 + shape * q / scale))
+ ans[!scase] = exp(-zedd[!scase]^(-1 / shape[!scase]))
+ }
+ if (nscase) {
+ ans[scase] = pgumbel(q[scase], location[scase], scale[scase])
+ }
+ ans[scale <= 0] = NaN
+ ans
}
qgev <- function(p, location = 0, scale = 1, shape = 0) {
- if (!is.Numeric(p, posit = TRUE) || any(p >= 1))
- stop("0 < p < 1 is required")
- if (!is.Numeric(location))
- stop("bad input for argument 'location'")
- if (!is.Numeric(shape))
- stop("bad input for argument 'shape'")
-
- use.n = max(length(p), length(location), length(scale), length(shape))
- ans = numeric(use.n)
- shape = rep(shape, len = use.n); location = rep(location, len = use.n);
- scale = rep(scale, len = use.n); p = rep(p, len = use.n)
- scase = abs(shape) < sqrt(.Machine$double.eps)
- nscase = sum(scase)
- if (use.n - nscase) {
- ans[!scase] = location[!scase] + scale[!scase] *
- ((-log(p[!scase]))^(-shape[!scase]) -1) / shape[!scase]
- }
- if (nscase)
- ans[scase] = qgumbel(p[scase], location[scase], scale[scase])
- ans[scale <= 0] = NaN
- ans
+ if (!is.Numeric(p, positive = TRUE) || any(p >= 1))
+ stop("0 < p < 1 is required")
+ if (!is.Numeric(location))
+ stop("bad input for argument 'location'")
+ if (!is.Numeric(shape))
+ stop("bad input for argument 'shape'")
+
+ use.n = max(length(p), length(location), length(scale), length(shape))
+ ans = numeric(use.n)
+ shape = rep(shape, length.out = use.n)
+ location = rep(location, length.out = use.n);
+ scale = rep(scale, length.out = use.n);
+ p = rep(p, length.out = use.n)
+
+ scase = abs(shape) < sqrt(.Machine$double.eps)
+ nscase = sum(scase)
+ if (use.n - nscase) {
+ ans[!scase] = location[!scase] + scale[!scase] *
+ ((-log(p[!scase]))^(-shape[!scase]) - 1) / shape[!scase]
+ }
+ if (nscase)
+ ans[scase] = qgumbel(p[scase], location[scase], scale[scase])
+ ans[scale <= 0] = NaN
+ ans
}
@@ -155,7 +167,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
stop("bad input for argument 'giveWarning'")
mean = FALSE
- if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
+ if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
@@ -165,18 +177,18 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
lshape = as.character(substitute(lshape))
if (!mean && length(percentiles) &&
- (!is.Numeric(percentiles, posit = TRUE) || max(percentiles) >= 100))
+ (!is.Numeric(percentiles, positive = TRUE) || max(percentiles) >= 100))
stop("bad input for argument 'percentiles'")
- if (!is.Numeric(imethod, allow = 1, posit = TRUE, integer = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, positive = TRUE, integer.valued = TRUE) ||
imethod > 2.5)
stop("argument 'imethod' must be 1 or 2")
if (length(ishape) && !is.Numeric(ishape))
stop("bad input for argument 'ishape'")
- if (!is.Numeric(tolshape0, allow = 1, posit = TRUE) || tolshape0 > 0.1)
+ if (!is.Numeric(tolshape0, allowable.length = 1, positive = TRUE) || tolshape0 > 0.1)
stop("bad input for argument 'tolshape0'")
- if (!is.Numeric(gshape, allow = 2) || gshape[1] >= gshape[2])
+ if (!is.Numeric(gshape, allowable.length = 2) || gshape[1] >= gshape[2])
stop("bad input for argument 'gshape'")
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(elocation)) elocation = list()
@@ -209,27 +221,32 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
extra$percentiles = .percentiles
if (!length(etastart)) {
- init.sig= if (length( .iscale)) rep( .iscale, len = nrow(y)) else NULL
- init.xi = if (length( .ishape)) rep( .ishape, len = nrow(y)) else NULL
- eshape = .eshape
- if ( .lshape == "elogit" && length(init.xi) &&
- (any(init.xi <= eshape$min | init.xi >= eshape$max)))
- stop("bad input for argument 'eshape'")
- if ( .imethod == 1) {
+ init.sig = if (length( .iscale))
+ rep( .iscale, length.out = nrow(y)) else NULL
+ init.xi = if (length( .ishape))
+ rep( .ishape, length.out = nrow(y)) else NULL
+ eshape = .eshape
+
+ if ( .lshape == "elogit" && length(init.xi) &&
+ (any(init.xi <= eshape$min |
+ init.xi >= eshape$max)))
+ stop("bad input for argument 'eshape'")
+
+ if ( .imethod == 1) {
nvector = 4:10 # Arbitrary; could be made an argument
ynvector = quantile(y[, 1], probs = 1-1/nvector)
objecFunction = -Inf # Actually the log-likelihood
est.sigma = !length(init.sig)
gshape = .gshape
temp234 = if (length(init.xi)) init.xi[1] else
- seq(gshape[1], gshape[2], len = 12)
+ seq(gshape[1], gshape[2], length.out = 12)
for(xi.try in temp234) {
xvec = if (abs(xi.try) < .tolshape0) log(nvector) else
(nvector^xi.try - 1) / xi.try
fit0 = lsfit(x = xvec, y=ynvector, intercept = TRUE)
sigmaTry = if (est.sigma)
- rep(fit0$coef["X"], len = nrow(y)) else init.sig
- muTry = rep(fit0$coef["Intercept"], len = nrow(y))
+ rep(fit0$coef["X"], length.out = nrow(y)) else init.sig
+ muTry = rep(fit0$coef["Intercept"], length.out = nrow(y))
llTry = egev(giveWarning=
FALSE)@loglikelihood(mu = NULL, y=y[, 1], w=w,
residuals = FALSE,
@@ -239,19 +256,19 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
if (llTry >= objecFunction) {
if (est.sigma)
init.sig = sigmaTry
- init.mu = rep(muTry, len = nrow(y))
+ init.mu = rep(muTry, length.out = nrow(y))
objecFunction = llTry
bestxi = xi.try
}
}
if (!length(init.xi))
- init.xi = rep(bestxi, len = nrow(y))
+ init.xi = rep(bestxi, length.out = nrow(y))
} else {
- init.xi = rep(0.05, len = nrow(y))
+ init.xi = rep(0.05, length.out = nrow(y))
if (!length(init.sig))
- init.sig = rep(sqrt(6 * var(y[, 1]))/pi, len = nrow(y))
+ init.sig = rep(sqrt(6 * var(y[, 1]))/pi, length.out = nrow(y))
EulerM = -digamma(1)
- init.mu = rep(median(y[, 1]) - EulerM*init.sig, len = nrow(y))
+ init.mu = rep(median(y[, 1]) - EulerM*init.sig, length.out = nrow(y))
}
bad = ((1 + init.xi*(y-init.mu)/init.sig) <= 0)
@@ -444,8 +461,8 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
if (FALSE ) {
wz[, iam(1, 2, M)] = 2 * r.vec / sigma^2
wz[, iam(2, 2, M)] = -4 * r.vec * digamma(r.vec+1) + 2 * r.vec +
- (4 * dgammadx(r.vec+1, der=1) -
- 3 * dgammadx(r.vec+1, der = 2)) / gamma(r.vec) # Not checked
+ (4 * dgammadx(r.vec+1, deriv.arg = 1) -
+ 3 * dgammadx(r.vec+1, deriv.arg = 2)) / gamma(r.vec) # Not checked
}
}
wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] * dmu.deta^2
@@ -461,7 +478,7 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
-dgammadx <- function(x, deriv.arg=1) {
+dgammadx <- function(x, deriv.arg = 1) {
if (deriv.arg == 0) {
gamma(x)
} else if (deriv.arg == 1) {
@@ -469,14 +486,18 @@ dgammadx <- function(x, deriv.arg=1) {
} else if (deriv.arg == 2) {
gamma(x) * (trigamma(x) + digamma(x)^2)
} else if (deriv.arg == 3) {
- gamma(x) * (psigamma(x, der = 2) + 2 * digamma(x) * trigamma(x)) +
- dgammadx(x, der=1) * (trigamma(x) + digamma(x)^2)
+ gamma(x) * (psigamma(x, deriv = 2) +
+ 2 * digamma(x) * trigamma(x)) +
+ Recall(x, deriv.arg = 1) * (trigamma(x) + digamma(x)^2)
} else if (deriv.arg == 4) {
- dgammadx(x, der = 2) * (trigamma(x) + digamma(x)^2) +
- 2 * dgammadx(x, der=1) * (psigamma(x, der = 2) + 2*digamma(x) * trigamma(x)) +
- gamma(x) * (psigamma(x, der=3) + 2*trigamma(x)^2 +
- 2 * digamma(x) * psigamma(x, der = 2))
- } else stop("cannot handle deriv > 4")
+ Recall(x, deriv.arg = 2) * (trigamma(x) + digamma(x)^2) +
+ 2 * Recall(x, deriv.arg = 1) * (psigamma(x, deriv = 2) +
+ 2*digamma(x) * trigamma(x)) +
+ gamma(x) * (psigamma(x, deriv = 3) + 2*trigamma(x)^2 +
+ 2 * digamma(x) * psigamma(x, deriv = 2))
+ } else {
+ stop("cannot handle 'deriv' > 4")
+ }
}
@@ -497,7 +518,7 @@ dgammadx <- function(x, deriv.arg=1) {
{
if (!is.logical(giveWarning) || length(giveWarning) != 1)
stop("bad input for argument 'giveWarning'")
- if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
+ if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale <- as.character(substitute(lscale))
@@ -505,19 +526,19 @@ dgammadx <- function(x, deriv.arg=1) {
llocation <- as.character(substitute(llocation))
if (mode(lshape) != "character" && mode(lshape) != "name")
lshape <- as.character(substitute(lshape))
- if (!is.Numeric(gshape, allow = 2) || gshape[1] >= gshape[2])
+ if (!is.Numeric(gshape, allowable.length = 2) || gshape[1] >= gshape[2])
stop("bad input for argument 'gshape'")
if (length(percentiles) &&
- (!is.Numeric(percentiles, posit = TRUE) || max(percentiles) >= 100))
+ (!is.Numeric(percentiles, positive = TRUE) || max(percentiles) >= 100))
stop("bad input for argument 'percentiles'")
- if (!is.Numeric(imethod, allow = 1, posit = TRUE, integer = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, positive = TRUE, integer.valued = TRUE) ||
imethod > 2.5)
stop("argument 'imethod' must be 1 or 2")
if (length(ishape) && !is.Numeric(ishape))
stop("bad input for argument 'ishape'")
- if (!is.Numeric(tolshape0, allow = 1, posit = TRUE) || tolshape0 > 0.1)
+ if (!is.Numeric(tolshape0, allowable.length = 1, positive = TRUE) || tolshape0 > 0.1)
stop("bad input for argument 'tolshape0'")
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(elocation)) elocation = list()
if (!is.list(escale)) escale = list()
@@ -540,8 +561,8 @@ dgammadx <- function(x, deriv.arg=1) {
if (ncol(as.matrix(y)) != 1)
stop("response must be a vector or one-column matrix")
if (!length(etastart)) {
- init.sig= if (length( .iscale)) rep( .iscale, len = length(y)) else NULL
- init.xi = if (length( .ishape)) rep( .ishape, len = length(y)) else NULL
+ init.sig= if (length( .iscale)) rep( .iscale, length.out = length(y)) else NULL
+ init.xi = if (length( .ishape)) rep( .ishape, length.out = length(y)) else NULL
eshape = .eshape
if ( .lshape == "elogit" && length(init.xi) &&
(any(init.xi <= eshape$min | init.xi >= eshape$max)))
@@ -553,17 +574,17 @@ dgammadx <- function(x, deriv.arg=1) {
est.sigma = !length(init.sig)
gshape = .gshape
temp234 = if (length(init.xi)) init.xi[1] else
- seq(gshape[1], gshape[2], len = 12)
+ seq(gshape[1], gshape[2], length.out = 12)
for(xi.try in temp234) {
xvec = if (abs(xi.try) < .tolshape0) log(nvector) else
(nvector^xi.try - 1) / xi.try
fit0 = lsfit(x = xvec, y=ynvector, intercept = TRUE)
if (est.sigma) {
- sigmaTry = rep(fit0$coef["X"], len = length(y))
+ sigmaTry = rep(fit0$coef["X"], length.out = length(y))
} else {
sigmaTry = init.sig
}
- muTry = rep(fit0$coef["Intercept"], len = length(y))
+ muTry = rep(fit0$coef["Intercept"], length.out = length(y))
llTry = egev(giveWarning=
FALSE)@loglikelihood(mu = NULL, y=y, w=w,
residuals = FALSE,
@@ -573,21 +594,21 @@ dgammadx <- function(x, deriv.arg=1) {
if (llTry >= objecFunction) {
if (est.sigma)
init.sig = sigmaTry
- init.mu = rep(muTry, len = length(y))
+ init.mu = rep(muTry, length.out = length(y))
objecFunction = llTry
bestxi = xi.try
}
}
if (!length(init.xi))
- init.xi = rep(bestxi, len = length(y))
+ init.xi = rep(bestxi, length.out = length(y))
} else {
init.xi = rep(if (length(init.xi)) init.xi else 0.05,
- len = length(y))
+ length.out = length(y))
if (!length(init.sig))
- init.sig = rep(sqrt(6*var(y))/pi, len = length(y))
+ init.sig = rep(sqrt(6*var(y))/pi, length.out = length(y))
EulerM = -digamma(1)
- init.mu = rep(median(y) - EulerM * init.sig, len = length(y))
+ init.mu = rep(median(y) - EulerM * init.sig, length.out = length(y))
}
bad <- (1 + init.xi*(y-init.mu)/init.sig <= 0)
if (fred <- sum(bad, na.rm = TRUE)) {
@@ -727,7 +748,8 @@ dgammadx <- function(x, deriv.arg=1) {
wz[is.zero, iam(1,3, M)] <- -(trigamma(1)/2 + digamma(1)*
(digamma(1)/2+1))/sigma
wz[is.zero, iam(2,3, M)] <- (-dgammadx(2,3)/6 + dgammadx(1, 1) +
- 2*dgammadx(1, 2) + 2*dgammadx(1,3)/3)/sigma
+ 2*dgammadx(1, 2) +
+ 2*dgammadx(1,3)/3)/sigma
}
wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] * dmu.deta^2
wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] * dsi.deta^2
@@ -745,7 +767,7 @@ dgammadx <- function(x, deriv.arg=1) {
rgumbel <- function(n, location = 0, scale = 1) {
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
+ if (!is.Numeric(n, integer.valued = TRUE, allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
answer = location - scale * log(-log(runif(use.n)))
@@ -795,11 +817,11 @@ pgumbel <- function(q, location = 0, scale = 1) {
if (!is.logical(mpv) || length(mpv) != 1)
stop("bad input for argument 'mpv'")
if (length(percentiles) &&
- (!is.Numeric(percentiles, posit = TRUE) || max(percentiles) >= 100))
+ (!is.Numeric(percentiles, positive = TRUE) || max(percentiles) >= 100))
stop("bad input for argument 'percentiles'")
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
+ if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
if (!is.list(elocation)) elocation = list()
if (!is.list(escale)) escale = list()
@@ -824,15 +846,15 @@ pgumbel <- function(q, location = 0, scale = 1) {
stop("There is at least one row of the response containing all NAs")
if (ncol(y) > 1) {
yiri = y[cbind(1:nrow(y), r.vec)]
- sc.init = if (is.Numeric( .iscale, posit = TRUE))
+ sc.init = if (is.Numeric( .iscale, positive = TRUE))
.iscale else {3 * (rowMeans(y, na.rm = TRUE) - yiri)}
sc.init = rep(sc.init, length=nrow(y))
sc.init[sc.init <= 0.0001] = 1 # Used to be .iscale
loc.init = yiri + sc.init * log(r.vec)
} else {
- sc.init = if (is.Numeric( .iscale, posit = TRUE))
+ sc.init = if (is.Numeric( .iscale, positive = TRUE))
.iscale else 1.1 * (0.01+sqrt(var(y)*6)) / pi
- sc.init = rep(sc.init, len = n)
+ sc.init = rep(sc.init, length.out = n)
EulerM = -digamma(1)
loc.init = (y - sc.init * EulerM)
loc.init[loc.init <= 0] = min(y)
@@ -946,7 +968,7 @@ pgumbel <- function(q, location = 0, scale = 1) {
rgpd <- function(n, location = 0, scale = 1, shape = 0) {
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
+ if (!is.Numeric(n, integer.valued = TRUE, allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
if (!is.Numeric(location))
@@ -954,8 +976,8 @@ rgpd <- function(n, location = 0, scale = 1, shape = 0) {
if (!is.Numeric(shape))
stop("bad input for argument 'shape'")
ans = numeric(use.n)
- shape = rep(shape, len = use.n); location = rep(location, len = use.n);
- scale = rep(scale, len = use.n)
+ shape = rep(shape, length.out = use.n); location = rep(location, length.out = use.n);
+ scale = rep(scale, length.out = use.n)
scase = abs(shape) < sqrt(.Machine$double.eps)
nscase = sum(scase)
if (use.n - nscase)
@@ -978,14 +1000,14 @@ dgpd <- function(x, location = 0, scale = 1, shape = 0, log = FALSE,
if (oobounds.log > 0)
stop("bad input for argument 'oobounds.log'")
- if (!is.Numeric(tolshape0, allow = 1, posit = TRUE))
+ if (!is.Numeric(tolshape0, allowable.length = 1, positive = TRUE))
stop("bad input for argument 'tolshape0'")
L = max(length(x), length(location), length(scale), length(shape))
- shape = rep(shape, len = L); location = rep(location, len = L);
- scale = rep(scale, len = L);
- x = rep(x, len = L)
+ shape = rep(shape, length.out = L); location = rep(location, length.out = L);
+ scale = rep(scale, length.out = L);
+ x = rep(x, length.out = L)
- logdensity = rep(log(0), len = L)
+ logdensity = rep(log(0), length.out = L)
scase = abs(shape) < tolshape0
nscase = sum(scase)
if (L - nscase) {
@@ -1032,10 +1054,10 @@ pgpd <- function(q, location = 0, scale = 1, shape = 0) {
use.n = max(length(q), length(location), length(scale), length(shape))
ans = numeric(use.n)
- shape = rep(shape, len = use.n);
- location = rep(location, len = use.n);
- scale = rep(scale, len = use.n);
- q = rep(q-location, len = use.n)
+ shape = rep(shape, length.out = use.n);
+ location = rep(location, length.out = use.n);
+ scale = rep(scale, length.out = use.n);
+ q = rep(q-location, length.out = use.n)
scase = abs(shape) < sqrt(.Machine$double.eps)
nscase = sum(scase)
@@ -1058,8 +1080,8 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
use.n = max(length(p), length(location), length(scale), length(shape))
ans = numeric(use.n)
- shape = rep(shape, len = use.n); location = rep(location, len = use.n);
- scale = rep(scale, len = use.n); p = rep(p, len = use.n)
+ shape = rep(shape, length.out = use.n); location = rep(location, length.out = use.n);
+ scale = rep(scale, length.out = use.n); p = rep(p, length.out = use.n)
scase = abs(shape) < sqrt(.Machine$double.eps)
nscase = sum(scase)
if (use.n - nscase) {
@@ -1101,7 +1123,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
stop("bad input for argument 'giveWarning'")
if (!is.Numeric(threshold))
stop("bad input for argument 'threshold'")
- if (!is.Numeric(imethod, allow = 1, posit = TRUE, integer = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, positive = TRUE, integer.valued = TRUE) ||
imethod > 2.5)
stop("argument 'imethod' must be 1 or 2")
@@ -1111,12 +1133,12 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
lshape = as.character(substitute(lshape))
if (length(percentiles) &&
- (!is.Numeric(percentiles, posit = TRUE) ||
+ (!is.Numeric(percentiles, positive = TRUE) ||
max(percentiles) >= 100))
stop("bad input for argument 'percentiles'")
- if (!is.Numeric(tolshape0, allow = 1, posit = TRUE) || tolshape0 > 0.1)
+ if (!is.Numeric(tolshape0, allowable.length = 1, positive = TRUE) || tolshape0 > 0.1)
stop("bad input for argument 'tolshape0'")
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(escale)) escale = list()
@@ -1239,7 +1261,7 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
}
igpd = !is.zero & !bad
iexp = is.zero & !bad
- dl.dShape = dl.dsigma = rep(0, len = length(y))
+ dl.dShape = dl.dsigma = rep(0, length.out = length(y))
dl.dsigma[igpd] = ((1 + Shape[igpd]) * ystar[igpd] / (sigma[igpd] +
Shape[igpd]*ystar[igpd]) - 1) / sigma[igpd]
dl.dShape[igpd] = log(A[igpd])/Shape[igpd]^2 - (1 + 1/Shape[igpd]) *
@@ -1390,11 +1412,11 @@ setMethod("guplot", "vlm",
if (!is.logical(mpv) || length(mpv) != 1)
stop("bad input for argument 'mpv'")
if (length(percentiles) &&
- (!is.Numeric(percentiles, posit = TRUE) || max(percentiles) >= 100))
+ (!is.Numeric(percentiles, positive = TRUE) || max(percentiles) >= 100))
stop("bad input for argument 'percentiles'")
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
+ if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
if (!is.list(elocation)) elocation = list()
if (!is.list(escale)) escale = list()
@@ -1424,9 +1446,9 @@ setMethod("guplot", "vlm",
extra$percentiles = .percentiles
if (!length(etastart)) {
- sc.init = if (is.Numeric( .iscale, posit = TRUE))
+ sc.init = if (is.Numeric( .iscale, positive = TRUE))
.iscale else 1.5 * (0.01+sqrt(var(y)*6)) / pi
- sc.init = rep(sc.init, len = n)
+ sc.init = rep(sc.init, length.out = n)
EulerM = -digamma(1)
loc.init = (y - sc.init * EulerM)
etastart = cbind(theta2eta(loc.init, .llocation, earg = .elocation),
@@ -1523,10 +1545,10 @@ setMethod("guplot", "vlm",
lscale = as.character(substitute(lscale))
if (!is.logical(mean) || length(mean) != 1)
stop("mean must be a single logical value")
- if (!mean && (!is.Numeric(percentiles, posit = TRUE) ||
+ if (!mean && (!is.Numeric(percentiles, positive = TRUE) ||
any(percentiles>=100)))
stop("valid percentiles values must be given when mean = FALSE")
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(elocation)) elocation = list()
if (!is.list(escale)) escale = list()
@@ -1549,18 +1571,21 @@ setMethod("guplot", "vlm",
if (any(y) <= 0)
stop("all response values must be positive")
- if (!length(extra$leftcensored)) extra$leftcensored = rep(FALSE, len = n)
- if (!length(extra$rightcensored)) extra$rightcensored = rep(FALSE, len = n)
+ if (!length(extra$leftcensored))
+ extra$leftcensored = rep(FALSE, length.out = n)
+ if (!length(extra$rightcensored))
+ extra$rightcensored = rep(FALSE, length.out = n)
if (any(extra$rightcensored & extra$leftcensored))
stop("some observations are both right and left censored!")
predictors.names =
c(namesof("location", .llocation, earg = .elocation, tag = FALSE),
- namesof("scale", .lscale, earg = .escale , tag = FALSE))
+ namesof("scale", .lscale, earg = .escale , tag = FALSE))
+
if (!length(etastart)) {
- sc.init = if (is.Numeric( .iscale, posit = TRUE))
+ sc.init = if (is.Numeric( .iscale, positive = TRUE))
.iscale else 1.1 * sqrt(var(y) * 6 ) / pi
- sc.init = rep(sc.init, len = n)
+ sc.init = rep(sc.init, length.out = n)
EulerM = -digamma(1)
loc.init = (y - sc.init * EulerM)
loc.init[loc.init <= 0] = min(y)
@@ -1690,10 +1715,10 @@ dfrechet <- function(x, location = 0, scale = 1, shape, log = FALSE) {
rm(log)
L = max(length(x), length(scale), length(shape), length(location))
- x = rep(x, len = L); scale = rep(scale, len = L);
- shape = rep(shape, len = L); location = rep(location, len = L);
+ x = rep(x, length.out = L); scale = rep(scale, length.out = L);
+ shape = rep(shape, length.out = L); location = rep(location, length.out = L);
- logdensity = rep(log(0), len = L)
+ logdensity = rep(log(0), length.out = L)
xok = (x > location)
rzedd = scale / (x - location)
logdensity[xok] = log(shape[xok]) - (rzedd[xok]^shape[xok]) +
@@ -1706,9 +1731,9 @@ dfrechet <- function(x, location = 0, scale = 1, shape, log = FALSE) {
pfrechet <- function(q, location = 0, scale = 1, shape) {
- if (!is.Numeric(scale, posit = TRUE))
+ if (!is.Numeric(scale, positive = TRUE))
stop("scale must be positive")
- if (!is.Numeric(shape, posit = TRUE))
+ if (!is.Numeric(shape, positive = TRUE))
stop("shape must be positive")
rzedd = scale / (q - location)
ans = exp(-(rzedd^shape))
@@ -1718,20 +1743,20 @@ pfrechet <- function(q, location = 0, scale = 1, shape) {
qfrechet <- function(p, location = 0, scale = 1, shape) {
- if (!is.Numeric(p, posit = TRUE) || any(p >= 1))
+ if (!is.Numeric(p, positive = TRUE) || any(p >= 1))
stop("0 < p < 1 is required")
- if (!is.Numeric(scale, posit = TRUE))
+ if (!is.Numeric(scale, positive = TRUE))
stop("scale must be positive")
- if (!is.Numeric(shape, posit = TRUE))
+ if (!is.Numeric(shape, positive = TRUE))
stop("shape must be positive")
location + scale * (-log(p))^(-1/shape)
}
rfrechet <- function(n, location = 0, scale = 1, shape) {
- if (!is.Numeric(scale, posit = TRUE))
+ if (!is.Numeric(scale, positive = TRUE))
stop("scale must be positive")
- if (!is.Numeric(shape, posit = TRUE))
+ if (!is.Numeric(shape, positive = TRUE))
stop("shape must be positive")
location + scale * (-log(runif(n)))^(-1/shape)
@@ -1791,7 +1816,7 @@ frechet2.control <- function(save.weight = TRUE, ...)
c(namesof("scale", .lscale, earg = .escale, short = TRUE),
namesof("shape", .lshape, earg = .eshape, short = TRUE))
- extra$location = rep( .location, len = n) # stored here
+ extra$location = rep( .location, length.out = n) # stored here
if (!length(etastart)) {
locinit = extra$location
@@ -1813,8 +1838,8 @@ frechet2.control <- function(save.weight = TRUE, ...)
y = y, x = x, w = w, maximize = FALSE,
abs.arg = TRUE)
- shape.init = if (length( .ishape )) rep( .ishape, len = n) else {
- rep(try.this, len = n) # variance exists if shape > 2
+ shape.init = if (length( .ishape )) rep( .ishape, length.out = n) else {
+ rep(try.this, length.out = n) # variance exists if shape > 2
}
@@ -1823,11 +1848,11 @@ frechet2.control <- function(save.weight = TRUE, ...)
myquant = (-log(myprobs))^(-1/shape.init[1])
myfit = lsfit(x = myquant, y = myobsns)
- Scale.init = if (length( .iscale)) rep( .iscale, len = n) else {
+ Scale.init = if (length( .iscale)) rep( .iscale, length.out = n) else {
if (all(shape.init > 1)) {
myfit$coef[2]
} else {
- rep( 1.0, len = n)
+ rep( 1.0, length.out = n)
}
}
@@ -1843,7 +1868,7 @@ frechet2.control <- function(save.weight = TRUE, ...)
Scale = eta2theta(eta[, 1], .lscale, earg = .escale )
shape = eta2theta(eta[, 2], .lshape, earg = .eshape )
- ans = rep(as.numeric(NA), len = length(shape))
+ ans = rep(as.numeric(NA), length.out = length(shape))
ok = shape > 1
ans[ok] = loc[ok] + Scale[ok] * gamma(1 - 1/shape[ok])
ans
@@ -1986,7 +2011,7 @@ if (FALSE)
namesof("scale", .lscale, earg = .escale, short = TRUE),
namesof("shape", .lshape, earg = .eshape, short = TRUE))
- anchorpt = if (is.Numeric( .anchor, allow = 1)) .anchor else min(y)
+ anchorpt = if (is.Numeric( .anchor, allowable.length = 1)) .anchor else min(y)
if (min(y) < anchorpt)
stop("anchor point is too large")
extra$LHSanchor = anchorpt
@@ -2010,8 +2035,8 @@ if (FALSE)
print("try.this")
print( try.this )
- shape.init = if (length( .ishape )) rep( .ishape, len = n) else {
- rep(try.this, len = n) # variance exists if shape > 2
+ shape.init = if (length( .ishape )) rep( .ishape, length.out = n) else {
+ rep(try.this, length.out = n) # variance exists if shape > 2
}
@@ -2026,25 +2051,27 @@ if (FALSE)
plot(myobsns ~ myquant)
- Scale.init = if (length( .iscale)) rep( .iscale, len = n) else {
+ Scale.init = if (length( .iscale )) {
+ rep( .iscale , length.out = n)
+ } else {
if (all(shape.init > 1)) {
myfit$coef[2]
} else {
- rep( 1.0, len = n)
+ rep( 1.0, length.out = n)
}
}
- locinit = if (length( .ilocation)) rep( .ilocation, len = n) else {
+ locinit = if (length( .ilocation)) rep( .ilocation, length.out = n) else {
if (myfit$coef[1] < min(y)) {
print("using myfit$coef[1] for initial location")
print( myfit$coef[1] )
print( min(y) )
print( anchorpt )
- rep(myfit$coef[1], len = n)
+ rep(myfit$coef[1], length.out = n)
} else {
print("using heuristic initial location")
- rep(anchorpt - 0.01 * diff(range(y)), len = n)
+ rep(anchorpt - 0.01 * diff(range(y)), length.out = n)
}
}
if (any(y <= locinit))
@@ -2072,7 +2099,7 @@ if (FALSE)
eta2theta(eta[, 1], .ldiffr, earg = .ediffr)
Scale = eta2theta(eta[, 2], .lscale, earg = .escale )
shape = eta2theta(eta[, 3], .lshape, earg = .eshape )
- ans = rep(as.numeric(NA), len = length(shape))
+ ans = rep(as.numeric(NA), length.out = length(shape))
okay = shape > 1
ans[okay] = loctn[okay] + Scale[okay] * gamma(1 - 1/shape[okay])
ans
@@ -2201,7 +2228,7 @@ recnormal1.control <- function(save.weight = TRUE, ...)
lmean = as.character(substitute(lmean))
if (mode(lsd) != "character" && mode(lsd) != "name")
lsd = as.character(substitute(lsd))
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, positi = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 3.5)
stop("argument 'imethod' must be 1 or 2 or 3")
@@ -2227,9 +2254,9 @@ recnormal1.control <- function(save.weight = TRUE, ...)
if (any(w != 1))
warning("weights should have unit values only")
if (!length(etastart)) {
- mean.init = if (length( .imean)) rep( .imean, len = n) else {
+ mean.init = if (length( .imean)) rep( .imean, length.out = n) else {
if (.lmean == "loge") pmax(1/1024, min(y)) else min(y)}
- sd.init = if (length( .isd)) rep( .isd, len = n) else {
+ sd.init = if (length( .isd)) rep( .isd, length.out = n) else {
if (.imethod == 1) 1*(sd(c(y))) else
if (.imethod == 2) 5*(sd(c(y))) else
.5*(sd(c(y)))
@@ -2306,7 +2333,8 @@ recexp1.control <- function(save.weight = TRUE, ...)
if (mode(lrate) != "character" && mode(lrate) != "name")
lrate = as.character(substitute(lrate))
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, positi = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 3.5)
stop("argument 'imethod' must be 1 or 2 or 3")
@@ -2378,18 +2406,18 @@ recexp1.control <- function(save.weight = TRUE, ...)
poissonp <- function(ostatistic, dimension = 2,
link = "loge", earg = list(),
idensity = NULL, imethod = 1) {
- if (!is.Numeric(ostatistic, posit = TRUE, allow = 1, integ = TRUE))
+ if (!is.Numeric(ostatistic, positive = TRUE, allowable.length = 1, integer.valued = TRUE))
stop("argument 'ostatistic' must be a single positive integer")
- if (!is.Numeric(dimension, posit = TRUE, allow = 1, integ = TRUE) ||
+ if (!is.Numeric(dimension, positive = TRUE, allowable.length = 1, integer.valued = TRUE) ||
dimension > 3)
stop("argument 'dimension' must be 2 or 3")
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
- if (!is.Numeric(imethod, allow = 1, posit = TRUE, integer = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, positive = TRUE, integer.valued = TRUE) ||
imethod > 2.5)
stop("argument 'imethod' must be 1 or 2")
- if (length(idensity) && !is.Numeric(idensity, posit = TRUE))
+ if (length(idensity) && !is.Numeric(idensity, positive = TRUE))
stop("bad input for argument 'idensity'")
new("vglmff",
diff --git a/R/family.fishing.R b/R/family.fishing.R
index 6fd3209..73cea0f 100644
--- a/R/family.fishing.R
+++ b/R/family.fishing.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -18,12 +18,12 @@ DeLury = function(catch, effort,
if (type == "DeLury") {
Et = cumsum(effort) - ifelse(ricker, 0.5, 1) * effort
logCPUE = log(CPUE)
- lmfit = lm(logCPUE ~ Et, x=TRUE)
+ lmfit = lm(logCPUE ~ Et, x = TRUE)
myq = catchabilityCoefficient = -coef(lmfit)[2]
N0 = exp(coef(lmfit)["(Intercept)"]) / myq
} else {
Kt = cumsum(catch) - ifelse(ricker, 0.5, 1) * catch
- lmfit = lm(CPUE ~ Kt, x=TRUE)
+ lmfit = lm(CPUE ~ Kt, x = TRUE)
myq = catchabilityCoefficient = -coef(lmfit)[2]
N0 = coef(lmfit)["(Intercept)"] / myq
}
@@ -50,7 +50,9 @@ DeLury = function(catch, effort,
wffc.P1 = function(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
ifelse(length >= min.eligible, c1 + (ppm/100) *
- ceiling( signif(100*length, dig = 8) ), 0)
+ ceiling( signif(100*length, digits = 8) ), 0)
+
+
wffc.P1star = function(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
ifelse(length >= min.eligible, c1 + ppm * length, 0)
diff --git a/R/family.functions.R b/R/family.functions.R
index 919107b..21e8086 100644
--- a/R/family.functions.R
+++ b/R/family.functions.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/family.genetic.R b/R/family.genetic.R
index c590f8f..72fa69d 100644
--- a/R/family.genetic.R
+++ b/R/family.genetic.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -28,10 +28,14 @@
namesof("f", link, earg = earg, tag = FALSE)),
deviance = Deviance.categorical.data.vgam,
initialize = eval(substitute(expression({
- delete.zero.colns = FALSE
+ mustart.orig = mustart
+ delete.zero.colns = FALSE
eval(process.categorical.data.vgam)
+ if (length(mustart.orig))
+ mustart = mustart.orig
+
ok.col.ny = c("G1G1","G1G2","G1G3","G2G2","G2G3","G3G3")
if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
setequal(ok.col.ny, col.ny)) {
@@ -42,55 +46,68 @@
}
predictors.names =
- c(namesof("p1", .link, earg = .earg, tag = FALSE),
- namesof("p2", .link, earg = .earg, tag = FALSE),
- namesof("f", .link, earg = .earg, tag = FALSE))
+ c(namesof("p1", .link , earg = .earg , tag = FALSE),
+ namesof("p2", .link , earg = .earg , tag = FALSE),
+ namesof("f", .link , earg = .earg , tag = FALSE))
+
if (is.null(etastart)) {
+
+
+
+ mydeterminant = mustart[, 2] * mustart[, 3] +
+ mustart[, 2] * mustart[, 5] +
+ mustart[, 3] * mustart[, 5]
p1 = if (is.numeric( .ip1 )) rep( .ip1 , len = n) else
- sqrt(mustart[,1])
- f = if (is.numeric( .iF )) rep( .iF , len = n) else
- rep(0.01, len = n) # close to zero
+ mustart[, 2] * mustart[, 3] / mydeterminant
p2 = if (is.numeric( .ip2 )) rep( .ip2 , len = n) else
- mustart[,2] / (sqrt(mustart[,1]) * 2)
+ mustart[, 2] * mustart[, 5] / mydeterminant
+ ff = if (is.numeric( .iF )) rep( .iF , len = n) else
+ abs(1 - mustart[, 2] / (2 * p1 * p2))
+
if (any(p1 <= 0) || any(p1 >= 1))
- stop("bad initial value for 'p1'")
+ stop("bad initial value for 'p1'")
if (any(p2 <= 0) || any(p2 >= 1))
- stop("bad initial value for 'p2'")
- etastart = cbind(theta2eta(p1, .link, earg = .earg),
- theta2eta(p2, .link, earg = .earg),
- theta2eta(f, .link, earg = .earg))
+ stop("bad initial value for 'p2'")
+
+ etastart = cbind(theta2eta(p1, .link , earg = .earg ),
+ theta2eta(p2, .link , earg = .earg ),
+ theta2eta(ff, .link , earg = .earg ))
mustart <- NULL # Since etastart has been computed.
}
}), list( .link = link, .ip1 = ip1, .ip2 = ip2, .iF = iF,
- .earg = earg ))),
+ .earg = earg))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- p1 = eta2theta(eta[,1], link = .link, earg = .earg)
- p2 = eta2theta(eta[,2], link = .link, earg = .earg)
- f = eta2theta(eta[,3], link = .link, earg = .earg)
- p3 = 1 - p1 - p2
+ p1 = eta2theta(eta[, 1], link = .link , earg = .earg )
+ p2 = eta2theta(eta[, 2], link = .link , earg = .earg )
+ f = eta2theta(eta[, 3], link = .link , earg = .earg )
+ p3 = abs(1 - p1 - p2)
cbind("G1G1" = f*p1+(1-f)*p1^2,
"G1G2" = 2*p1*p2*(1-f),
"G1G3" = 2*p1*p3*(1-f),
"G2G2" = f*p2+(1-f)*p2^2,
"G2G3" = 2*p2*p3*(1-f),
"G3G3" = f*p3+(1-f)*p3^2)
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = c(p1 = .link, p2 = .link, f = .link)
- misc$earg = list(p1 = .earg, p2 = .earg, f = .earg )
- }), list( .link = link, .earg = earg ))),
+ }, list( .link = link, .earg = earg))),
+
+ last = eval(substitute(expression({
+ misc$link = c(p1 = .link , p2 = .link , f = .link )
+ misc$earg = list(p1 = .earg , p2 = .earg , f = .earg )
+ misc$expected = TRUE
+ }), list( .link = link, .earg = earg))),
+
loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x=w*y, size=w, prob=mu, log = TRUE, docheck = FALSE))
+ sum(dmultinomial(x = w * y, size = w, prob = mu,
+ log = TRUE, dochecking = FALSE))
},
vfamily = c("G1G2G3", "vgenetic"),
deriv = eval(substitute(expression({
- p1 = eta2theta(eta[,1], link = .link, earg = .earg)
- p2 = eta2theta(eta[,2], link = .link, earg = .earg)
+ p1 = eta2theta(eta[, 1], link = .link , earg = .earg )
+ p2 = eta2theta(eta[, 2], link = .link , earg = .earg )
p3 = 1-p1-p2
- f = eta2theta(eta[,3], link = .link, earg = .earg)
+ f = eta2theta(eta[, 3], link = .link , earg = .earg )
dP1 = cbind(f + 2*p1*(1-f), 2*(1-f)*p2, 2*(1-f)*(1-p2-2*p1),
0, -2*(1-f)*p2, -f - 2*p3*(1-f))
dP2 = cbind(0, 2*p1*(1-f), -2*(1-f)*p1, f+2*p2*(1-f),
@@ -100,24 +117,24 @@
dl1 = rowSums(y * dP1 / mu)
dl2 = rowSums(y * dP2 / mu)
dl3 = rowSums(y * dP3 / mu)
- dPP.deta = dtheta.deta(cbind(p1, p2, f), link = .link, earg = .earg)
- c(w) * cbind(dPP.deta[,1] * dl1,
- dPP.deta[,2] * dl2,
- dPP.deta[,3] * dl3)
- }), list( .link = link, .earg = earg ))),
+ dPP.deta = dtheta.deta(cbind(p1, p2, f), link = .link , earg = .earg )
+ c(w) * cbind(dPP.deta[, 1] * dl1,
+ dPP.deta[, 2] * dl2,
+ dPP.deta[, 3] * dl3)
+ }), list( .link = link, .earg = earg))),
weight = eval(substitute(expression({
- dPP = array(c(dP1,dP2,dP3), c(n,6,3))
+ dPP = array(c(dP1,dP2,dP3), c(n,6, 3))
wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==6 because M==3
for(i1 in 1:M)
for(i2 in i1:M) {
- index = iam(i1,i2,M)
+ index = iam(i1,i2, M)
wz[,index] = rowSums(dPP[, , i1, drop = TRUE] *
dPP[, , i2, drop = TRUE] / mu) *
dPP.deta[, i1] * dPP.deta[, i2]
}
c(w) * wz
- }), list( .link = link, .earg = earg ))))
+ }), list( .link = link, .earg = earg))))
}
@@ -136,9 +153,14 @@
namesof("f", "identity", tag = FALSE)),
deviance = Deviance.categorical.data.vgam,
initialize = eval(substitute(expression({
+ mustart.orig = mustart
+
delete.zero.colns = FALSE
eval(process.categorical.data.vgam)
+ if (length(mustart.orig))
+ mustart = mustart.orig
+
ok.col.ny = c("AA","Aa","aa")
if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
setequal(ok.col.ny, col.ny)) {
@@ -148,46 +170,47 @@
}
predictors.names =
- c(namesof("pA", .link, earg = .earg, tag = FALSE),
+ c(namesof("pA", .link , earg = .earg , tag = FALSE),
namesof("f", "identity", earg = list(), tag = FALSE))
+
if (is.null(etastart)) {
pA = if (is.numeric( .ipA )) rep( .ipA , len = n) else
c(sqrt(mustart[, 1] - mustart[, 2] / 2))
f = if (is.numeric( .iF )) rep( .iF , len = n) else
- rep(0.01, len = n) # 1- mustart[,2]/(2*pA*(1-pA))
+ rep(0.01, len = n) # 1- mustart[, 2]/(2*pA*(1-pA))
if (any(pA <= 0) || any(pA >= 1))
stop("bad initial value for 'pA'")
- etastart = cbind(theta2eta(pA, .link, earg = .earg),
+ etastart = cbind(theta2eta(pA, .link , earg = .earg ),
theta2eta(f, "identity"))
mustart <- NULL # Since etastart has been computed.
}
- }), list( .link = link, .ipA=ipA, .iF = iF, .earg = earg ))),
+ }), list( .link = link, .ipA = ipA, .iF = iF, .earg = earg))),
+
linkinv = eval(substitute(function(eta, extra = NULL) {
- pA = eta2theta(eta[,1], link = .link, earg = .earg)
- f = eta2theta(eta[,2], link = "identity", earg = list())
+ pA = eta2theta(eta[, 1], link = .link , earg = .earg )
+ f = eta2theta(eta[, 2], link = "identity", earg = list())
cbind(AA = pA^2+pA*(1-pA)*f,
Aa = 2*pA*(1-pA)*(1-f),
aa = (1-pA)^2 + pA*(1-pA)*f)
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = c(pA = .link, f = "identity")
- misc$earg = list(pA = .earg, f = list() )
- }), list( .link = link, .earg = earg ))),
- linkfun = eval(substitute(function(mu, extra = NULL) {
- pA = sqrt(mu[,1] - mu[,2]/2)
- f = 1 - mu[,2] / (2*pA*(1-pA))
- cbind(theta2eta(pA, .link, earg = .earg),
- theta2eta(f, "identity"))
- }, list( .link = link, .earg = earg ))),
+ }, list( .link = link, .earg = earg))),
+
+ last = eval(substitute(expression({
+ misc$link = c(pA = .link , f = "identity")
+ misc$earg = list(pA = .earg , f = list() )
+ misc$expected = TRUE
+ }), list( .link = link, .earg = earg))),
+
+
loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x=w*y, size=w, prob=mu, log = TRUE, docheck = FALSE))
+ sum(dmultinomial(x = w * y, size = w, prob = mu,
+ log = TRUE, dochecking = FALSE))
},
vfamily = c("AAaa.nohw", "vgenetic"),
deriv = eval(substitute(expression({
- pA = eta2theta(eta[,1], link = .link, earg = .earg)
- f = eta2theta(eta[,2], link = "identity")
+ pA = eta2theta(eta[, 1], link = .link , earg = .earg )
+ f = eta2theta(eta[, 2], link = "identity")
dP1 = cbind(f + 2*pA*(1-f),
2*(1-f)*(1-2*pA),
-2*(1-pA) +f*(1-2*pA))
@@ -196,24 +219,24 @@
pA*(1-pA))
dl1 = rowSums(y * dP1 / mu)
dl2 = rowSums(y * dP2 / mu)
- dPP.deta = dtheta.deta(pA, link = .link, earg = .earg)
+ dPP.deta = dtheta.deta(pA, link = .link , earg = .earg )
c(w) * cbind(dPP.deta * dl1,
dl2)
- }), list( .link = link, .earg = earg ))),
+ }), list( .link = link, .earg = earg))),
weight = eval(substitute(expression({
dPP = array(c(dP1, dP2), c(n, 3, 2))
- dPP.deta = cbind(dtheta.deta(pA, link = .link, earg = .earg),
+ dPP.deta = cbind(dtheta.deta(pA, link = .link , earg = .earg ),
dtheta.deta(f, link = "identity"))
wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
for(i1 in 1:M)
for(i2 in i1:M) {
- index = iam(i1,i2,M)
+ index = iam(i1,i2, M)
wz[,index] = rowSums(dPP[,,i1,drop = TRUE] *
dPP[,,i2,drop = TRUE] / mu) *
dPP.deta[,i1] * dPP.deta[,i2]
}
c(w) * wz
- }), list( .link = link, .earg = earg ))))
+ }), list( .link = link, .earg = earg))))
}
@@ -231,9 +254,14 @@
namesof("p", link, earg = earg)),
deviance = Deviance.categorical.data.vgam,
initialize = eval(substitute(expression({
+ mustart.orig = mustart
+
delete.zero.colns = FALSE
eval(process.categorical.data.vgam)
- predictors.names = namesof("p", .link, earg = .earg, tag = FALSE)
+ predictors.names = namesof("p", .link , earg = .earg , tag = FALSE)
+
+ if (length(mustart.orig))
+ mustart = mustart.orig
ok.col.ny = c("AB","Ab","aB","ab")
if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
@@ -245,46 +273,47 @@
if (is.null(etastart)) {
p.init = if (is.numeric(.init.p)) rep(.init.p, n) else
- c(1 - 2 * sqrt(mustart[,4]))
- etastart = theta2eta(p.init, .link, earg = .earg)
+ c(1 - 2 * sqrt(mustart[, 4]))
+ etastart = theta2eta(p.init, .link , earg = .earg )
+ mustart <- NULL # Since etastart has been computed.
}
- }), list( .link = link, .init.p=init.p, .earg = earg ))),
+ }), list( .link = link, .init.p=init.p, .earg = earg))),
linkinv = eval(substitute(function(eta,extra = NULL) {
- p = eta2theta(eta, link = .link, earg = .earg)
+ p = eta2theta(eta, link = .link , earg = .earg )
cbind("AB" = (2+(1-p)^2),
"Ab" = (1-(1-p)^2),
"aB" = (1-(1-p)^2),
"ab" = (1-p)^2) / 4
- }, list( .link = link, .earg = earg ) )),
- last = eval(substitute(expression({
- misc$link = c(p = .link)
- misc$earg = list(p= .earg )
- }), list( .link = link, .earg = earg ) )),
- linkfun = eval(substitute(function(mu, extra = NULL) {
- p = 1 - 2 * sqrt(mu[,4])
- theta2eta(p, .link, earg = .earg)
- }, list( .link = link, .earg = earg ) )),
+ }, list( .link = link, .earg = earg) )),
+
+ last = eval(substitute(expression({
+ misc$link = c(p = .link )
+ misc$earg = list(p = .earg )
+ misc$expected = TRUE
+ }), list( .link = link, .earg = earg) )),
+
+
loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x=w*y, size=w, prob=mu,
- log = TRUE, docheck = FALSE))
+ sum(dmultinomial(x = w * y, size = w, prob = mu,
+ log = TRUE, dochecking = FALSE))
},
vfamily = c("AB.Ab.aB.ab2", "vgenetic"),
deriv = eval(substitute(expression({
- pp = eta2theta(eta, link = .link, earg = .earg)
+ pp = eta2theta(eta, link = .link , earg = .earg )
dP1 = cbind(-0.5*(1-pp),
0.5*(1-pp),
0.5*(1-pp),
-0.5*(1-pp))
dl1 = rowSums(y * dP1 / mu)
- dPP.deta = dtheta.deta(pp, link = .link, earg = .earg)
+ dPP.deta = dtheta.deta(pp, link = .link , earg = .earg )
c(w) * dPP.deta * dl1
- }), list( .link = link, .earg = earg ) )),
+ }), list( .link = link, .earg = earg) )),
weight = eval(substitute(expression({
wz = rowSums(dP1 * dP1 / mu) * dPP.deta^2
c(w) * wz
- }), list( .link = link, .earg = earg ) )))
+ }), list( .link = link, .earg = earg) )))
}
@@ -303,9 +332,14 @@
namesof("p2", link, earg = earg, tag = FALSE)),
deviance = Deviance.categorical.data.vgam,
initialize = eval(substitute(expression({
+ mustart.orig = mustart
+
delete.zero.colns = FALSE
eval(process.categorical.data.vgam)
+ if (length(mustart.orig))
+ mustart = mustart.orig
+
ok.col.ny = c("A1A1","A1A2","A2A2","A1A3","A2A3","A3A3")
if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
setequal(ok.col.ny, col.ny)) {
@@ -316,67 +350,66 @@
}
predictors.names =
- c(namesof("pA", .link, earg = .earg, tag = FALSE),
- namesof("pB", .link, earg = .earg, tag = FALSE))
+ c(namesof("pA", .link , earg = .earg , tag = FALSE),
+ namesof("pB", .link , earg = .earg , tag = FALSE))
+
if (is.null(etastart)) {
p1 = if (is.numeric(.ip1)) rep(.ip1, n) else
- c(sqrt(mustart[,1]))
+ c(sqrt(mustart[, 1]))
p2 = if (is.numeric(.ip2)) rep(.ip2, n) else
- c(sqrt(mustart[,3]))
- etastart = cbind(theta2eta(p1, .link, earg = .earg),
- theta2eta(p2, .link, earg = .earg))
+ c(sqrt(mustart[, 3]))
+ etastart = cbind(theta2eta(p1, .link , earg = .earg ),
+ theta2eta(p2, .link , earg = .earg ))
+ mustart <- NULL # Since etastart has been computed.
}
- }), list( .link = link, .ip1 = ip1, .ip2 = ip2, .earg = earg ))),
+ }), list( .link = link, .ip1 = ip1, .ip2 = ip2, .earg = earg))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- p1 = eta2theta(eta[,1], link = .link, earg = .earg)
- p2 = eta2theta(eta[,2], link = .link, earg = .earg)
- qq = 1-p1-p2
+ p1 = eta2theta(eta[, 1], link = .link , earg = .earg )
+ p2 = eta2theta(eta[, 2], link = .link , earg = .earg )
+ qq = abs(1 - p1 - p2)
cbind(A1A1 = p1*p1,
A1A2 = 2*p1*p2,
A2A2 = p2*p2,
A1A3 = 2*p1*qq,
A2A3 = 2*p2*qq,
A3A3 = qq*qq)
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = c(p1= .link, p2= .link)
- misc$earg = list(p1= .earg, p2= .earg )
- }), list( .link = link, .earg = earg ))),
- linkfun = eval(substitute(function(mu, extra = NULL) {
- p1 = sqrt(mu[,1])
- p2 = sqrt(mu[,3])
- qq = 1 - p1 - p2
- cbind(theta2eta(p1, .link, earg = .earg),
- theta2eta(p2, .link, earg = .earg))
- }, list( .link = link, .earg = earg ))),
+ }, list( .link = link, .earg = earg))),
+
+ last = eval(substitute(expression({
+ misc$link = c(p1 = .link , p2 = .link )
+ misc$earg = list(p1 = .earg , p2 = .earg )
+ misc$expected = TRUE
+ }), list( .link = link, .earg = earg))),
+
+
loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x=w*y, size=w, prob=mu,
- log = TRUE, docheck = FALSE))
+ sum(dmultinomial(x = w * y, size = w, prob = mu,
+ log = TRUE, dochecking = FALSE))
},
vfamily = c("A1A2A3", "vgenetic"),
deriv = eval(substitute(expression({
- p1 = eta2theta(eta[,1], link = .link, earg = .earg)
- p2 = eta2theta(eta[,2], link = .link, earg = .earg)
- dl.dp1 = (2*y[,1]+y[,2]+y[,4])/p1 - (2*y[,6]+y[,4]+y[,5])/(1-p1-p2)
- dl.dp2 = (2*y[,3]+y[,2]+y[,5])/p2 - (2*y[,6]+y[,4]+y[,5])/(1-p1-p2)
- dp1.deta = dtheta.deta(p1, link = .link, earg = .earg)
- dp2.deta = dtheta.deta(p2, link = .link, earg = .earg)
+ p1 = eta2theta(eta[, 1], link = .link , earg = .earg )
+ p2 = eta2theta(eta[, 2], link = .link , earg = .earg )
+ dl.dp1 = (2*y[, 1]+y[, 2]+y[, 4])/p1 - (2*y[,6]+y[, 4]+y[,5])/(1-p1-p2)
+ dl.dp2 = (2*y[, 3]+y[, 2]+y[,5])/p2 - (2*y[,6]+y[, 4]+y[,5])/(1-p1-p2)
+ dp1.deta = dtheta.deta(p1, link = .link , earg = .earg )
+ dp2.deta = dtheta.deta(p2, link = .link , earg = .earg )
c(w) * cbind(dl.dp1 * dp1.deta,
dl.dp2 * dp2.deta)
- }), list( .link = link, .earg = earg ))),
+ }), list( .link = link, .earg = earg))),
weight = eval(substitute(expression({
qq = 1-p1-p2
wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
ed2l.dp12 = 2 * (1/p1 + 1/qq)
ed2l.dp22 = 2 * (1/p2 + 1/qq)
ed2l.dp1dp2 = 2 / qq
- wz[,iam(1,1,M)] = dp1.deta^2 * ed2l.dp12
- wz[,iam(2,2,M)] = dp2.deta^2 * ed2l.dp22
- wz[,iam(1,2,M)] = ed2l.dp1dp2 * dp1.deta * dp2.deta
+ wz[, iam(1, 1, M)] = dp1.deta^2 * ed2l.dp12
+ wz[, iam(2, 2, M)] = dp2.deta^2 * ed2l.dp22
+ wz[, iam(1, 2, M)] = ed2l.dp1dp2 * dp1.deta * dp2.deta
c(w) * wz
- }), list( .link = link, .earg = earg ))))
+ }), list( .link = link, .earg = earg))))
}
@@ -397,9 +430,14 @@
namesof("nS", link, earg = earg, tag = FALSE)),
deviance = Deviance.categorical.data.vgam,
initialize = eval(substitute(expression({
+ mustart.orig = mustart
+
delete.zero.colns = FALSE
eval(process.categorical.data.vgam)
+ if (length(mustart.orig))
+ mustart = mustart.orig
+
ok.col.ny = c("MS","Ms","MNS","MNs","NS","Ns")
if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
setequal(ok.col.ny, col.ny)) {
@@ -410,57 +448,55 @@
}
predictors.names <-
- c(namesof("mS", .link, earg = .earg, tag = FALSE),
- namesof("ms", .link, earg = .earg, tag = FALSE),
- namesof("nS", .link, earg = .earg, tag = FALSE))
+ c(namesof("mS", .link , earg = .earg , tag = FALSE),
+ namesof("ms", .link , earg = .earg , tag = FALSE),
+ namesof("nS", .link , earg = .earg , tag = FALSE))
+
if (is.null(etastart)) {
ms = if (is.numeric(.ims)) rep(.ims, n) else
- c(sqrt(mustart[,2]))
+ c(sqrt(mustart[, 2]))
ns = c(sqrt(mustart[,6]))
nS = if (is.numeric(.inS)) rep(.inS, n) else
c(-ns + sqrt(ns^2 + mustart[,5])) # Solve a quadratic eqn
mS = if (is.numeric(.imS)) rep(.imS, n) else
1-ns-ms-nS
- etastart = cbind(theta2eta(mS, .link, earg = .earg),
- theta2eta(ms, .link, earg = .earg),
- theta2eta(nS, .link, earg = .earg))
+ etastart = cbind(theta2eta(mS, .link , earg = .earg ),
+ theta2eta(ms, .link , earg = .earg ),
+ theta2eta(nS, .link , earg = .earg ))
+ mustart <- NULL # Since etastart has been computed.
}
- }), list( .link = link, .imS = imS, .ims = ims, .inS = inS, .earg = earg ))),
+ }), list( .link = link, .imS = imS, .ims = ims, .inS = inS, .earg = earg))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- mS = eta2theta(eta[,1], link = .link, earg = .earg)
- ms = eta2theta(eta[,2], link = .link, earg = .earg)
- nS = eta2theta(eta[,3], link = .link, earg = .earg)
- ns = 1 - mS - ms - nS
+ mS = eta2theta(eta[, 1], link = .link , earg = .earg )
+ ms = eta2theta(eta[, 2], link = .link , earg = .earg )
+ nS = eta2theta(eta[, 3], link = .link , earg = .earg )
+ ns = abs(1 - mS - ms - nS)
cbind(MS = mS^2 + 2*mS*ms,
Ms = ms^2,
MNS = 2*(mS*nS + ms*nS + mS*ns),
MNs = 2*ms*ns,
NS = nS^2 + 2*nS*ns,
Ns = ns^2)
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = c(mS = .link, ms = .link, nS = .link)
- misc$earg = list(mS = .earg, ms = .earg, nS = .earg )
- }), list( .link = link, .earg = earg ))),
- linkfun = eval(substitute(function(mu, extra = NULL) {
- ms = sqrt(mu[,2])
- ns = sqrt(mu[,6])
- nS = c(-ns + sqrt(ns^2 + mu[,5]))
- mS = 1 - ns - ms - nS
- cbind(theta2eta(mS, .link, earg = .earg),
- theta2eta(ms, .link, earg = .earg),
- theta2eta(nS, .link, earg = .earg))
- }, list( .link = link, .earg = earg ))),
+ }, list( .link = link, .earg = earg))),
+
+ last = eval(substitute(expression({
+ misc$link = c(mS = .link , ms = .link , nS = .link )
+ misc$earg = list(mS = .earg , ms = .earg , nS = .earg )
+ misc$expected = TRUE
+ }), list( .link = link, .earg = earg))),
+
+
loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x=w*y, size=w, prob=mu, log = TRUE, docheck = FALSE))
+ sum(dmultinomial(x = w * y, size = w, prob = mu,
+ log = TRUE, dochecking = FALSE))
},
vfamily = c("MNSs", "vgenetic"),
deriv = eval(substitute(expression({
- mS = eta2theta(eta[,1], link = .link, earg = .earg)
- ms = eta2theta(eta[,2], link = .link, earg = .earg)
- nS = eta2theta(eta[,3], link = .link, earg = .earg)
+ mS = eta2theta(eta[, 1], link = .link , earg = .earg )
+ ms = eta2theta(eta[, 2], link = .link , earg = .earg )
+ nS = eta2theta(eta[, 3], link = .link , earg = .earg )
ns = 1-mS-ms-nS
dP1 = cbind(2*(mS+ms), 0, 2*(nS+ns-mS), -2*ms, -2*nS, -2*ns)
dP2 = cbind(2*mS, 2*ms, 2*(nS-mS), 2*(ns-ms), -2*nS, -2*ns)
@@ -468,21 +504,21 @@
dl1 = rowSums(y * dP1 / mu)
dl2 = rowSums(y * dP2 / mu)
dl3 = rowSums(y * dP3 / mu)
- dPP.deta = dtheta.deta(cbind(mS, ms, nS), link = .link, earg = .earg)
+ dPP.deta = dtheta.deta(cbind(mS, ms, nS), link = .link , earg = .earg )
c(w) * dPP.deta * cbind(dl1, dl2, dl3)
- }), list( .link = link, .earg = earg ))),
+ }), list( .link = link, .earg = earg))),
weight = eval(substitute(expression({
- dPP = array(c(dP1,dP2,dP3), c(n,6,3))
+ dPP = array(c(dP1,dP2,dP3), c(n,6, 3))
wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==6 because M==3
for(i1 in 1:M)
for(i2 in i1:M) {
- index = iam(i1,i2,M)
+ index = iam(i1,i2, M)
wz[,index] = rowSums(dPP[,,i1,drop = TRUE] *
dPP[,,i2,drop = TRUE] / mu) *
dPP.deta[,i1] * dPP.deta[,i2]
}
c(w) * wz
- }), list( .link = link, .earg = earg ))))
+ }), list( .link = link, .earg = earg))))
}
@@ -492,104 +528,112 @@
ABO = function(link = "logit", earg = list(), ipA = NULL, ipO = NULL)
{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+ if (mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+ if (!is.list(earg)) earg = list()
- new("vglmff",
- blurb = c("ABO Blood Group System (A-B-AB-O phenotype)\n\n",
+ new("vglmff",
+ blurb = c("ABO Blood Group System (A-B-AB-O phenotype)\n\n",
"Links: ",
namesof("pA", link, earg = earg), ", ",
namesof("pB", link, earg = earg, tag = FALSE)),
- deviance = Deviance.categorical.data.vgam,
- initialize = eval(substitute(expression({
- delete.zero.colns = FALSE
- eval(process.categorical.data.vgam)
-
- ok.col.ny = c("A","B","AB","O")
- if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
- setequal(ok.col.ny, col.ny)) {
- if (!all(ok.col.ny == col.ny))
- stop("the columns of the response matrix should have names ",
- "(output of colnames()) ordered as c('A','B','AB','O')")
- }
-
- predictors.names <-
- c(namesof("pA", .link, earg = .earg, tag = FALSE),
- namesof("pB", .link, earg = .earg, tag = FALSE))
-
- if (is.null(etastart)) {
- pO = if (is.Numeric( .ipO )) rep( .ipO , len = n) else
- c(sqrt(mustart[,4]))
- pA = if (is.Numeric( .ipA )) rep( .ipA , len = n) else
- c(1 - sqrt(mustart[,2] + mustart[,4]))
- pB = abs(1 - pA - pO)
- etastart = cbind(theta2eta(pA, .link, earg = .earg),
- theta2eta(pB, .link, earg = .earg))
- }
- }), list( .link = link, .ipO = ipO, .ipA = ipA, .earg = earg ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- pA = eta2theta(eta[,1], link = .link, earg = .earg)
- pB = eta2theta(eta[,2], link = .link, earg = .earg)
- pO = abs(1 - pA - pB)
- cbind(A = pA*(pA+2*pO),
- B = pB*(pB+2*pO),
- AB = 2*pA*pB,
- O = pO*pO)
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = c(pA = .link, pB = .link)
- misc$earg = list(pA = .earg, pB = .earg )
- }), list( .link = link, .earg = earg ))),
- linkfun = eval(substitute(function(mu, extra = NULL) {
- pO = sqrt(mu[,4])
- p1 = ( (1-pO)+sqrt((1-pO)^2 + 2*mu[,3]) )/2
- p2 = ( (1-pO)-sqrt((1-pO)^2 + 2*mu[,3]) )/2
- index = (p2 >= 0) & (p2 <= 1)
- pA = p1
- pA[index] = p2[index]
- pB = abs(1 - pA - pO)
- cbind(theta2eta(pA, .link, earg = .earg),
- theta2eta(pB, .link, earg = .earg))
- }, list( .link = link, .earg = earg ))),
- loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x=w*y, size=w, prob=mu, log = TRUE,
- docheck = FALSE))
- },
- vfamily = c("ABO", "vgenetic"),
- deriv = eval(substitute(expression({
- ppp = eta2theta(eta[,1], link = .link, earg = .earg)
- qqq = eta2theta(eta[,2], link = .link, earg = .earg)
- rrr = abs(1 - ppp - qqq)
-
-
- pbar = 2*rrr+ppp
- qbar = 2*rrr+qqq
- na = y[,1]
- nb = y[,2]
- nab = y[,3]
- no = y[,4]
-
- dl.dp = (na+nab)/ppp - na/pbar - 2*nb/qbar - 2*no/rrr
- dl.dq = (nb+nab)/qqq - 2*na/pbar - nb/qbar - 2*no/rrr
- dp.deta = dtheta.deta(ppp, link = .link, earg = .earg)
- dq.deta = dtheta.deta(qqq, link = .link, earg = .earg)
-
- c(w) * cbind(dl.dp * dp.deta,
- dl.dq * dq.deta)
- }), list( .link = link, .earg = earg ))),
- weight = eval(substitute(expression({
- wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
- ed2l.dp2 = (1 + 2/ppp + 4*qqq/qbar + ppp/pbar)
- ed2l.dq2 = (1 + 2/qqq + 4*ppp/pbar + qqq/qbar)
- ed2l.dpdq = 2 * (1 + qqq/qbar + ppp/pbar)
- wz[,iam(1,1,M)] = dp.deta^2 * ed2l.dp2
- wz[,iam(2,2,M)] = dq.deta^2 * ed2l.dq2
- wz[,iam(1,2,M)] = ed2l.dpdq * dp.deta * dq.deta
- c(w) * wz
- }), list( .link = link, .earg = earg ))))
+ deviance = Deviance.categorical.data.vgam,
+
+ initialize = eval(substitute(expression({
+ mustart.orig = mustart
+
+ delete.zero.colns = FALSE
+ eval(process.categorical.data.vgam)
+
+ if (length(mustart.orig))
+ mustart = mustart.orig
+
+ ok.col.ny = c("A","B","AB","O")
+ if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
+ setequal(ok.col.ny, col.ny)) {
+ if (!all(ok.col.ny == col.ny))
+ stop("the columns of the response matrix should have names ",
+ "(output of colnames()) ordered as c('A','B','AB','O')")
+ }
+
+
+ predictors.names <-
+ c(namesof("pA", .link , earg = .earg , tag = FALSE),
+ namesof("pB", .link , earg = .earg , tag = FALSE))
+
+ if (!length(etastart)) {
+ pO = if (is.Numeric( .ipO )) rep( .ipO , len = n) else
+ c(sqrt(mustart[, 4]))
+ pA = if (is.Numeric( .ipA )) rep( .ipA , len = n) else
+ c(1 - sqrt(mustart[, 2] + mustart[, 4]))
+ pB = abs(1 - pA - pO)
+ etastart = cbind(theta2eta(pA, .link , earg = .earg ),
+ theta2eta(pB, .link , earg = .earg ))
+ mustart <- NULL # Since etastart has been computed.
+ }
+ }), list( .link = link, .ipO = ipO, .ipA = ipA, .earg = earg))),
+
+
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ pA = eta2theta(eta[, 1], link = .link , earg = .earg )
+ pB = eta2theta(eta[, 2], link = .link , earg = .earg )
+ pO = abs(1 - pA - pB)
+ cbind(A = pA*(pA+2*pO),
+ B = pB*(pB+2*pO),
+ AB = 2*pA*pB,
+ O = pO*pO)
+ }, list( .link = link, .earg = earg))),
+
+ last = eval(substitute(expression({
+ misc$link = c(pA = .link , pB = .link )
+ misc$earg = list(pA = .earg , pB = .earg )
+ misc$expected = TRUE
+ }), list( .link = link, .earg = earg))),
+
+
+ loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x = w * y, size = w, prob = mu, log = TRUE,
+ dochecking = FALSE))
+ },
+
+ vfamily = c("ABO", "vgenetic"),
+
+ deriv = eval(substitute(expression({
+ ppp = eta2theta(eta[, 1], link = .link , earg = .earg )
+ qqq = eta2theta(eta[, 2], link = .link , earg = .earg )
+ rrr = abs(1 - ppp - qqq)
+
+
+ pbar = 2*rrr + ppp
+ qbar = 2*rrr + qqq
+ naa = y[, 1]
+ nbb = y[, 2]
+ nab = y[, 3]
+ noo = y[, 4]
+
+ dl.dp = (naa+nab)/ppp - naa/pbar - 2*nbb/qbar - 2*noo/rrr
+ dl.dq = (nbb+nab)/qqq - 2*naa/pbar - nbb/qbar - 2*noo/rrr
+ dp.deta = dtheta.deta(ppp, link = .link , earg = .earg )
+ dq.deta = dtheta.deta(qqq, link = .link , earg = .earg )
+
+ c(w) * cbind(dl.dp * dp.deta,
+ dl.dq * dq.deta)
+ }), list( .link = link, .earg = earg))),
+
+ weight = eval(substitute(expression({
+ wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
+
+ ed2l.dp2 = (1 + 2/ppp + 4*qqq/qbar + ppp/pbar)
+ ed2l.dq2 = (1 + 2/qqq + 4*ppp/pbar + qqq/qbar)
+ ed2l.dpdq = 2 * (1 + qqq/qbar + ppp/pbar)
+
+ wz[, iam(1, 1, M)] = ed2l.dp2 * dp.deta^2
+ wz[, iam(2, 2, M)] = ed2l.dq2 * dq.deta^2
+ wz[, iam(1, 2, M)] = ed2l.dpdq * dp.deta * dq.deta
+ c(w) * wz
+ }), list( .link = link, .earg = earg))))
}
@@ -606,62 +650,71 @@
"Links: ", namesof("p", link, earg = earg, tag = TRUE)),
deviance = Deviance.categorical.data.vgam,
initialize = eval(substitute(expression({
+ mustart.orig = mustart
+
delete.zero.colns = FALSE
eval(process.categorical.data.vgam)
+ if (length(mustart.orig))
+ mustart = mustart.orig
+
ok.col.ny = c("AB","Ab","aB","ab")
if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
setequal(ok.col.ny, col.ny)) {
if (!all(ok.col.ny == col.ny))
- stop("the columns of the response matrix should have names ",
- "(output of colnames()) ordered as c('AB','Ab','aB','ab')")
+ stop("the columns of the response matrix should have ",
+ "names (output of colnames()) ordered as ",
+ "c('AB','Ab','aB','ab')")
}
- predictors.names = namesof("p", .link, earg = .earg, tag = FALSE)
+ predictors.names = namesof("p", .link , earg = .earg , tag = FALSE)
+
if (is.null(etastart)) {
p = if (is.numeric( .init.p )) rep(.init.p, len = n) else
c(sqrt(4 * mustart[, 4]))
- etastart = cbind(theta2eta(p, .link, earg = .earg))
+ etastart = cbind(theta2eta(p, .link , earg = .earg ))
+ mustart <- NULL # Since etastart has been computed.
}
- }), list( .link = link, .init.p=init.p, .earg = earg ))),
+ }), list( .link = link, .init.p=init.p, .earg = earg))),
linkinv = eval(substitute(function(eta,extra = NULL) {
- p = eta2theta(eta, link = .link, earg = .earg)
+ p = eta2theta(eta, link = .link , earg = .earg )
pp4 = p * p / 4
cbind(AB = 0.5 + pp4,
Ab = 0.25 - pp4,
aB = 0.25 - pp4,
ab = pp4)
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = c(p = .link)
- misc$earg = list(p= .earg )
- }), list( .link = link, .earg = earg ))),
- linkfun = eval(substitute(function(mu, extra = NULL) {
- p = sqrt(4* mu[,4])
- theta2eta(p, .link, earg = .earg)
- }, list( .link = link, .earg = earg ))),
+ }, list( .link = link, .earg = earg))),
+
+ last = eval(substitute(expression({
+ misc$link = c(p = .link )
+ misc$earg = list(p = .earg )
+ misc$expected = TRUE
+ }), list( .link = link, .earg = earg))),
+
+
loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x=w*y, size=w, prob=mu, log = TRUE, docheck = FALSE))
+ sum(dmultinomial(x = w * y, size = w, prob = mu,
+ log = TRUE, dochecking = FALSE))
},
vfamily = c("AB.Ab.aB.ab", "vgenetic"),
deriv = eval(substitute(expression({
- pp = eta2theta(eta, link = .link, earg = .earg)
+ pp = eta2theta(eta, link = .link , earg = .earg )
p2 = pp*pp
- nAB = w*y[,1]
- nAb = w*y[,2]
- naB = w*y[,3]
- nab = w*y[,4]
+ nAB = w * y[, 1]
+ nAb = w * y[, 2]
+ naB = w * y[, 3]
+ nab = w * y[, 4]
dl.dp = 8 * pp * (nAB/(2+p2) - (nAb+naB)/(1-p2) + nab/p2)
- dp.deta = dtheta.deta(pp, link = .link, earg = .earg)
+ dp.deta = dtheta.deta(pp, link = .link , earg = .earg )
dl.dp * dp.deta
- }), list( .link = link, .earg = earg ))),
+ }), list( .link = link, .earg = earg))),
weight = eval(substitute(expression({
ed2l.dp2 = 4 * p2 * (1/(2+p2) + 2/(1-p2) + 1/p2)
wz = cbind((dp.deta^2) * ed2l.dp2)
c(w) * wz
- }), list( .link = link, .earg = earg ))))
+ }), list( .link = link, .earg = earg))))
}
@@ -677,60 +730,68 @@
"Links: ", namesof("pA", link, earg = earg)),
deviance = Deviance.categorical.data.vgam,
initialize = eval(substitute(expression({
+ mustart.orig = mustart
+
delete.zero.colns = FALSE
eval(process.categorical.data.vgam)
+ if (length(mustart.orig))
+ mustart = mustart.orig
+
ok.col.ny = c("AA","Aa","aa")
if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
setequal(ok.col.ny, col.ny)) {
if (!all(ok.col.ny == col.ny))
- stop("the columns of the response matrix should have names ",
- "(output of colnames()) ordered as c('AA','Aa','aa')")
+ stop("the columns of the response matrix ",
+ "should have names ",
+ "(output of colnames()) ordered as c('AA','Aa','aa')")
}
- predictors.names = namesof("pA", .link, earg = .earg, tag = FALSE)
+ predictors.names = namesof("pA", .link , earg = .earg , tag = FALSE)
+
if (is.null(etastart)) {
pA = if (is.numeric(.init.pA)) rep(.init.pA, n) else
- c(sqrt(mustart[,1]))
- etastart = cbind(theta2eta(pA, .link, earg = .earg))
+ c(sqrt(mustart[, 1]))
+ etastart = cbind(theta2eta(pA, .link , earg = .earg ))
+ mustart <- NULL # Since etastart has been computed.
}
- }), list( .link = link, .init.pA=init.pA, .earg = earg ))),
+ }), list( .link = link, .init.pA=init.pA, .earg = earg))),
linkinv = eval(substitute(function(eta,extra = NULL) {
- pA = eta2theta(eta, link = .link, earg = .earg)
+ pA = eta2theta(eta, link = .link , earg = .earg )
pp = pA*pA
cbind(AA = pp,
Aa = 2*pA*(1-pA),
aa = (1-pA)^2)
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = c("pA" = .link)
- misc$earg = list("pA" = .earg )
- }), list( .link = link, .earg = earg ))),
- linkfun = eval(substitute(function(mu, extra = NULL) {
- pA = sqrt(mu[,1])
- theta2eta(pA, .link, earg = .earg)
- }, list( .link = link, .earg = earg ))),
+ }, list( .link = link, .earg = earg))),
+
+ last = eval(substitute(expression({
+ misc$link = c("pA" = .link )
+ misc$earg = list("pA" = .earg )
+ misc$expected = TRUE
+ }), list( .link = link, .earg = earg))),
+
+
loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x=w*y, size=w, prob=mu,
- log = TRUE, docheck = FALSE))
+ sum(dmultinomial(x = w * y, size = w, prob = mu,
+ log = TRUE, dochecking = FALSE))
},
vfamily = c("AA.Aa.aa", "vgenetic"),
deriv = eval(substitute(expression({
- pA = eta2theta(eta, link = .link, earg = .earg)
- nAA = w * y[,1]
- nAa = w * y[,2]
- naa = w * y[,3]
+ pA = eta2theta(eta, link = .link , earg = .earg )
+ nAA = w * y[, 1]
+ nAa = w * y[, 2]
+ naa = w * y[, 3]
dl.dpA = (2*nAA+nAa)/pA - (nAa+2*naa)/(1-pA)
- dpA.deta = dtheta.deta(pA, link = .link, earg = .earg)
+ dpA.deta = dtheta.deta(pA, link = .link , earg = .earg )
dl.dpA * dpA.deta
- }), list( .link = link, .earg = earg ))),
+ }), list( .link = link, .earg = earg))),
weight = eval(substitute(expression({
d2l.dp2 = (2*nAA+nAa)/pA^2 + (nAa+2*naa)/(1-pA)^2
wz = cbind((dpA.deta^2) * d2l.dp2)
wz
- }), list( .link = link, .earg = earg ))))
+ }), list( .link = link, .earg = earg))))
}
diff --git a/R/family.glmgam.R b/R/family.glmgam.R
index 5de8a0a..f001b4f 100644
--- a/R/family.glmgam.R
+++ b/R/family.glmgam.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -42,6 +42,7 @@
list(Musual = 1,
zero = .zero)
}, list( .zero = zero ))),
+
initialize = eval(substitute(expression({
assign("CQO.FastAlgorithm",
( .link == "logit" || .link == "cloglog"),
@@ -105,8 +106,8 @@
if (any(abs(y - round(y)) > 1.0e-8))
stop("Count data must be integer-valued")
y = round(y)
- nvec = y[,1] + y[,2]
- y = ifelse(nvec > 0, y[,1] / nvec, 0)
+ nvec = y[, 1] + y[, 2]
+ y = ifelse(nvec > 0, y[, 1] / nvec, 0)
w = w * nvec
if (!length(mustart) && !length(etastart))
mustart = (0.5 + nvec * y) / (1 + nvec)
@@ -164,9 +165,9 @@
.onedpar = onedpar, .mv = mv,
.link = link, .earg = earg ))),
- linkfun = eval(substitute(function(mu, extra = NULL)
- theta2eta(mu, .link, earg = .earg )
- , list( .link = link, .earg = earg ))),
+ linkfun = eval(substitute(function(mu, extra = NULL) {
+ theta2eta(mu, .link, earg = .earg )
+ }, list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
@@ -256,7 +257,7 @@
new("vglmff",
blurb = c("Gamma distribution\n\n",
- "Link: ", namesof("mu", link, earg=earg), "\n",
+ "Link: ", namesof("mu", link, earg =earg), "\n",
"Variance: mu^2 / k"),
deviance= function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
devi <- -2 * w * (log(ifelse(y == 0, 1, y/mu)) - (y - mu)/mu)
@@ -274,12 +275,12 @@
paste("mu", 1:M, sep = "")
}
predictors.names = namesof(if (M > 1) dn2 else "mu", .link,
- earg=.earg, short = TRUE)
+ earg =.earg, short = TRUE)
if (!length(etastart))
- etastart <- theta2eta(mustart, link = .link, earg=.earg)
+ etastart <- theta2eta(mustart, link = .link, earg =.earg)
}), list( .link = link, .earg = earg ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta, link = .link, earg=.earg)
+ eta2theta(eta, link = .link, earg =.earg)
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
dpar <- .dispersion
@@ -311,12 +312,12 @@
.estimated.dispersion = estimated.dispersion,
.link = link ))),
linkfun = eval(substitute(function(mu, extra = NULL) {
- theta2eta(mu, link = .link, earg=.earg)
+ theta2eta(mu, link = .link, earg =.earg)
}, list( .link = link, .earg = earg ))),
vfamily = "gammaff",
deriv = eval(substitute(expression({
dl.dmu = (y-mu) / mu^2
- dmu.deta = dtheta.deta(theta = mu, link = .link, earg=.earg)
+ dmu.deta = dtheta.deta(theta = mu, link = .link, earg =.earg)
w * dl.dmu * dmu.deta
}), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
@@ -431,7 +432,8 @@ pinv.gaussian = function(q, mu, lambda) {
rinv.gaussian = function(n, mu, lambda) {
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ=TRUE, allow = 1, posit = TRUE))
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
mu = rep(mu, len = use.n); lambda = rep(lambda, len = use.n)
@@ -466,106 +468,121 @@ rinv.gaussian = function(n, mu, lambda) {
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu <- as.character(substitute(lmu))
- if (mode(llambda) != "character" && mode(llambda) != "name")
- llambda <- as.character(substitute(llambda))
- if (!is.list(emu)) emu = list()
- if (!is.list(elambda)) elambda = list()
+ if (mode(lmu) != "character" && mode(lmu) != "name")
+ lmu <- as.character(substitute(lmu))
+ if (mode(llambda) != "character" && mode(llambda) != "name")
+ llambda <- as.character(substitute(llambda))
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
- imethod > 3)
- stop("argument 'imethod' must be 1 or 2 or 3")
- if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
- shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
+ if (!is.list(emu)) emu = list()
+ if (!is.list(elambda)) elambda = list()
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
+ imethod > 3)
+ stop("argument 'imethod' must be 1 or 2 or 3")
+ if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+ shrinkage.init < 0 ||
+ shrinkage.init > 1)
+ stop("bad input for argument 'shrinkage.init'")
- new("vglmff",
- blurb = c("Inverse Gaussian distribution\n\n",
- "f(y) = sqrt(lambda/(2*pi*y^3)) * ",
- "exp(-lambda*(y-mu)^2/(2*mu^2*y)); y, mu and lambda > 0",
- "Link: ", namesof("mu", lmu, earg= emu), ", ",
- namesof("lambda", llambda, earg= elambda), "\n",
- "Mean: ", "mu\n",
- "Variance: mu^3 / lambda"),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- if (any(y <= 0)) stop("Require the response to have positive values")
+ new("vglmff",
+ blurb = c("Inverse Gaussian distribution\n\n",
+ "f(y) = sqrt(lambda/(2*pi*y^3)) * ",
+ "exp(-lambda * (y - mu)^2 / (2 * mu^2 * y)); y, mu and lambda > 0",
+ "Link: ", namesof("mu", lmu, earg = emu), ", ",
+ namesof("lambda", llambda, earg = elambda), "\n",
+ "Mean: ", "mu\n",
+ "Variance: mu^3 / lambda"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
- predictors.names =
- c(namesof("mu", .lmu, earg = .emu, short= TRUE),
- namesof("lambda", .llambda, earg = .elambda, short= TRUE))
-
- if (!length(etastart)) {
- init.mu =
- if ( .imethod == 3) {
- 0 * y + 1.1 * median(y) + 1/8
- } else if ( .imethod == 2) {
- use.this = weighted.mean(y, w)
- (1 - .sinit) * y + .sinit * use.this
- } else {
- 0 * y + weighted.mean(y, w) + 1/8
- }
+ initialize = eval(substitute(expression({
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+
+ if (any(y <= 0))
+ stop("Require the response to have positive values")
+
+ predictors.names =
+ c(namesof("mu", .lmu, earg = .emu, short = TRUE),
+ namesof("lambda", .llambda, earg = .elambda, short = TRUE))
+
+ if (!length(etastart)) {
+ init.mu =
+ if ( .imethod == 3) {
+ 0 * y + 1.1 * median(y) + 1/8
+ } else if ( .imethod == 2) {
+ use.this = weighted.mean(y, w)
+ (1 - .sinit) * y + .sinit * use.this
+ } else {
+ 0 * y + weighted.mean(y, w) + 1/8
+ }
- init.lambda = rep(if (length( .ilambda )) .ilambda else 1.0,
- len = n)
+ init.lambda = rep(if (length( .ilambda )) .ilambda else 1.0,
+ len = n)
- etastart = cbind(
- theta2eta(init.mu, link = .lmu, earg = .emu),
- theta2eta(init.lambda, link = .llambda, earg = .elambda))
- }
- }), list( .lmu = lmu, .llambda = llambda,
- .emu = emu, .elambda = elambda,
- .sinit = shrinkage.init,
- .imethod = imethod, .ilambda = ilambda ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[,1], link = .lmu, earg = .emu)
- }, list( .lmu = lmu, .emu = emu, .elambda = elambda ))),
- last = eval(substitute(expression({
- misc$link = c(mu = .lmu, lambda = .llambda)
- misc$earg = list(mu = .emu, lambda = .elambda)
- misc$imethod = .imethod
- misc$shrinkage.init = .sinit
- }), list( .lmu = lmu, .llambda = llambda,
- .emu = emu, .elambda = elambda,
- .sinit = shrinkage.init,
- .imethod = imethod ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- lambda <- eta2theta(eta[,2], link = .llambda, earg = .elambda)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(w * dinv.gaussian(x=y, mu = mu, lambda = lambda, log = TRUE))
- }
- }, list( .llambda = llambda, .emu = emu,
- .elambda = elambda ))),
- vfamily = "inv.gaussianff",
- deriv = eval(substitute(expression({
- mymu <- eta2theta(eta[,1], link = .lmu, earg = .emu)
- lambda <- eta2theta(eta[,2], link = .llambda, earg = .elambda)
- dmu.deta <- dtheta.deta(theta = mymu, link = .lmu, earg = .emu)
- dlambda.deta <- dtheta.deta(theta = lambda, link = .llambda,
- earg = .elambda)
-
- dl.dmu = lambda * (y - mymu) / mymu^3
- dl.dlambda <- 0.5 / lambda - (y-mymu)^2 / (2 * mymu^2 * y)
- c(w) * cbind(dl.dmu * dmu.deta,
- dl.dlambda * dlambda.deta)
- }), list( .lmu = lmu, .llambda = llambda,
- .emu = emu, .elambda = elambda ))),
- weight = eval(substitute(expression({
- d2l.dmu2 = lambda / mymu^3
- d2l.dlambda2 = 0.5 / (lambda^2)
- wz <- cbind(dmu.deta^2 * d2l.dmu2,
- dlambda.deta^2 * d2l.dlambda2)
- c(w) * wz
- }), list( .lmu = lmu, .llambda = llambda,
- .emu = emu, .elambda = elambda ))))
+ etastart = cbind(
+ theta2eta(init.mu, link = .lmu, earg = .emu),
+ theta2eta(init.lambda, link = .llambda, earg = .elambda))
+ }
+ }), list( .lmu = lmu, .llambda = llambda,
+ .emu = emu, .elambda = elambda,
+ .sinit = shrinkage.init,
+ .imethod = imethod, .ilambda = ilambda ))),
+
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[, 1], link = .lmu, earg = .emu)
+ }, list( .lmu = lmu, .emu = emu, .elambda = elambda ))),
+
+ last = eval(substitute(expression({
+ misc$link = c(mu = .lmu, lambda = .llambda)
+ misc$earg = list(mu = .emu, lambda = .elambda)
+ misc$imethod = .imethod
+ misc$shrinkage.init = .sinit
+ misc$expected = TRUE
+ }), list( .lmu = lmu, .llambda = llambda,
+ .emu = emu, .elambda = elambda,
+ .sinit = shrinkage.init,
+ .imethod = imethod ))),
+
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ lambda <- eta2theta(eta[, 2], link = .llambda, earg = .elambda)
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(w * dinv.gaussian(x=y, mu = mu, lambda = lambda, log = TRUE))
+ }
+ }, list( .llambda = llambda, .emu = emu,
+ .elambda = elambda ))),
+
+ vfamily = "inv.gaussianff",
+
+ deriv = eval(substitute(expression({
+ mymu <- eta2theta(eta[, 1], link = .lmu, earg = .emu)
+ lambda <- eta2theta(eta[, 2], link = .llambda, earg = .elambda)
+
+ dmu.deta <- dtheta.deta(theta = mymu, link = .lmu, earg = .emu)
+ dlambda.deta <- dtheta.deta(theta = lambda, link = .llambda,
+ earg = .elambda)
+
+ dl.dmu = lambda * (y - mymu) / mymu^3
+ dl.dlambda <- 0.5 / lambda - (y-mymu)^2 / (2 * mymu^2 * y)
+ c(w) * cbind(dl.dmu * dmu.deta,
+ dl.dlambda * dlambda.deta)
+ }), list( .lmu = lmu, .llambda = llambda,
+ .emu = emu, .elambda = elambda ))),
+
+ weight = eval(substitute(expression({
+
+ d2l.dmu2 = lambda / mymu^3
+
+ d2l.dlambda2 = 0.5 / (lambda^2)
+ wz <- cbind(dmu.deta^2 * d2l.dmu2,
+ dlambda.deta^2 * d2l.dlambda2)
+ c(w) * wz
+ }), list( .lmu = lmu, .llambda = llambda,
+ .emu = emu, .elambda = elambda ))))
}
@@ -577,133 +594,143 @@ rinv.gaussian = function(n, mu, lambda) {
parallel = FALSE, zero = NULL)
{
- estimated.dispersion <- dispersion==0
- if (mode(link )!= "character" && mode(link )!= "name")
- link <- as.character(substitute(link))
- if (!is.list(earg)) earg = list()
- if (!is.Numeric(imethod, allow = 1, integ=TRUE, posit = TRUE) ||
- imethod > 3)
- stop("argument 'imethod' must be 1 or 2 or 3")
- if (length(imu) && !is.Numeric(imu, posit = TRUE))
- stop("bad input for argument 'imu'")
+ estimated.dispersion <- dispersion==0
+ if (mode(link )!= "character" && mode(link )!= "name")
+ link <- as.character(substitute(link))
+ if (!is.list(earg)) earg = list()
- new("vglmff",
- blurb = c("Poisson distribution\n\n",
- "Link: ", namesof("mu", link, earg = earg), "\n",
- "Variance: mu"),
- constraints = eval(substitute(expression({
- constraints <- cm.vgam(matrix(1,M,1), x, .parallel, constraints)
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
- }), list( .parallel = parallel, .zero = zero ))),
- deviance= function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- nz = y > 0
- devi = -(y - mu)
- devi[nz] = devi[nz] + y[nz] * log(y[nz]/mu[nz])
- if (residuals) sign(y - mu) * sqrt(2 * abs(devi) * w) else
- 2 * sum(w * devi)
- },
- infos = eval(substitute(function(...) {
- list(Musual = 1,
- zero = .zero)
- }, list( .zero = zero ))),
- initialize = eval(substitute(expression({
- y = as.matrix(y)
- M = ncoly = ncol(y)
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
+ imethod > 3)
+ stop("argument 'imethod' must be 1 or 2 or 3")
+ if (length(imu) && !is.Numeric(imu, positive = TRUE))
+ stop("bad input for argument 'imu'")
- assign("CQO.FastAlgorithm", ( .link == "loge"), envir = VGAM:::VGAMenv)
- dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
- dn2 = if (length(dn2)) {
- paste("E[", dn2, "]", sep = "")
- } else {
- paste("mu", 1:M, sep = "")
- }
- predictors.names = namesof(if (M > 1) dn2 else "mu", .link,
- earg = .earg, short = TRUE)
+ new("vglmff",
+ blurb = c("Poisson distribution\n\n",
+ "Link: ", namesof("mu", link, earg = earg), "\n",
+ "Variance: mu"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.vgam(matrix(1,M, 1), x, .parallel, constraints)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .parallel = parallel, .zero = zero ))),
+ deviance= function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ nz = y > 0
+ devi = -(y - mu)
+ devi[nz] = devi[nz] + y[nz] * log(y[nz]/mu[nz])
+ if (residuals) sign(y - mu) * sqrt(2 * abs(devi) * w) else
+ 2 * sum(w * devi)
+ },
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ zero = .zero)
+ }, list( .zero = zero ))),
- if (!length(etastart)) {
- mu.init = pmax(y, 1/8)
- for(iii in 1:ncol(y)) {
- if ( .imethod == 2) {
- mu.init[,iii] = weighted.mean(y[,iii], w) + 1/8
- } else if ( .imethod == 3) {
- mu.init[,iii] = median(y[,iii]) + 1/8
- }
+ initialize = eval(substitute(expression({
+ y = as.matrix(y)
+ M = ncoly = ncol(y)
+
+ assign("CQO.FastAlgorithm", ( .link == "loge"), envir = VGAM:::VGAMenv)
+ dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
+ dn2 = if (length(dn2)) {
+ paste("E[", dn2, "]", sep = "")
+ } else {
+ paste("mu", 1:M, sep = "")
+ }
+ predictors.names =
+ namesof(if (M > 1) dn2 else "mu", .link, earg = .earg, short = TRUE)
+
+ if (!length(etastart)) {
+ mu.init = pmax(y, 1/8)
+ for(iii in 1:ncol(y)) {
+ if ( .imethod == 2) {
+ mu.init[,iii] = weighted.mean(y[,iii], w) + 1/8
+ } else if ( .imethod == 3) {
+ mu.init[,iii] = median(y[,iii]) + 1/8
}
- if (length( .imu ))
- mu.init = matrix( .imu, n, ncoly, byrow=TRUE)
- etastart <- theta2eta(mu.init, link = .link, earg = .earg)
}
- }), list( .link = link, .estimated.dispersion = estimated.dispersion,
- .imethod = imethod, .imu = imu, .earg = earg ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
+ if (length( .imu ))
+ mu.init = matrix( .imu, n, ncoly, byrow = TRUE)
+ etastart <- theta2eta(mu.init, link = .link, earg = .earg)
+ }
+ }), list( .link = link, .estimated.dispersion = estimated.dispersion,
+ .imethod = imethod, .imu = imu, .earg = earg ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
mu = eta2theta(eta, link = .link, earg = .earg)
- mu
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- if (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
- rm("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)
- dpar <- .dispersion
- if (!dpar) {
- temp87 = (y-mu)^2 *
- wz / (dtheta.deta(mu, link = .link, earg = .earg)^2) # w cancel
- if (M > 1 && ! .onedpar) {
- dpar = rep(as.numeric(NA), len = M)
- temp87 = cbind(temp87)
- nrow.mu = if (is.matrix(mu)) nrow(mu) else length(mu)
- for(ii in 1:M)
- dpar[ii] = sum(temp87[,ii]) / (nrow.mu - ncol(x))
- if (is.matrix(y) && length(dimnames(y)[[2]])==length(dpar))
- names(dpar) = dimnames(y)[[2]]
- } else
- dpar = sum(temp87) / (length(mu) - ncol(x))
- }
- misc$dispersion <- dpar
- misc$default.dispersion <- 1
- misc$estimated.dispersion <- .estimated.dispersion
- misc$expected = TRUE
- misc$link = rep( .link, length = M)
- names(misc$link) = if (M > 1) dn2 else "mu"
- misc$imethod = .imethod
+ mu
+ }, list( .link = link, .earg = earg ))),
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- for(ii in 1:M) misc$earg[[ii]] = .earg
- }), list( .dispersion = dispersion, .imethod=imethod,
- .estimated.dispersion = estimated.dispersion,
- .onedpar = onedpar, .link = link, .earg = earg ))),
- linkfun = eval(substitute( function(mu, extra = NULL) {
- theta2eta(mu, link = .link, earg = .earg)
- }, list( .link = link, .earg = earg ))),
- loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- if (residuals) w*(y/mu - 1) else {
- sum(w * dpois(x=y, lambda=mu, log = TRUE))
- }
- },
- vfamily = "poissonff",
- deriv = eval(substitute(expression({
- if ( .link == "loge" && (any(mu < .Machine$double.eps))) {
- w * (y - mu)
- } else {
- lambda <- mu
- dl.dlambda <- (y-lambda) / lambda
- dlambda.deta <- dtheta.deta(theta = lambda, link = .link, earg = .earg)
- w * dl.dlambda * dlambda.deta
- }
- }), list( .link = link, .earg = earg ))),
- weight = eval(substitute(expression({
- if ( .link == "loge" && (any(mu < .Machine$double.eps))) {
- tmp600 = mu
- tmp600[tmp600 < .Machine$double.eps] = .Machine$double.eps
- w * tmp600
- } else {
- d2l.dlambda2 = 1 / lambda
- d2lambda.deta2=d2theta.deta2(theta = lambda,link= .link,earg = .earg)
- w * dlambda.deta^2 * d2l.dlambda2
- }
- }), list( .link = link, .earg = earg ))))
+ last = eval(substitute(expression({
+ if (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
+ rm("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)
+ dpar <- .dispersion
+ if (!dpar) {
+ temp87 = (y-mu)^2 *
+ wz / (dtheta.deta(mu, link = .link, earg = .earg)^2) # w cancel
+ if (M > 1 && ! .onedpar) {
+ dpar = rep(as.numeric(NA), len = M)
+ temp87 = cbind(temp87)
+ nrow.mu = if (is.matrix(mu)) nrow(mu) else length(mu)
+ for(ii in 1:M)
+ dpar[ii] = sum(temp87[,ii]) / (nrow.mu - ncol(x))
+ if (is.matrix(y) && length(dimnames(y)[[2]])==length(dpar))
+ names(dpar) = dimnames(y)[[2]]
+ } else {
+ dpar = sum(temp87) / (length(mu) - ncol(x))
+ }
+ }
+ misc$dispersion <- dpar
+ misc$default.dispersion <- 1
+ misc$estimated.dispersion <- .estimated.dispersion
+ misc$expected = TRUE
+ misc$link = rep( .link, length = M)
+ names(misc$link) = if (M > 1) dn2 else "mu"
+ misc$imethod = .imethod
+
+ misc$earg = vector("list", M)
+ names(misc$earg) = names(misc$link)
+ for(ii in 1:M)
+ misc$earg[[ii]] = .earg
+ }), list( .dispersion = dispersion, .imethod=imethod,
+ .estimated.dispersion = estimated.dispersion,
+ .onedpar = onedpar, .link = link, .earg = earg ))),
+
+ linkfun = eval(substitute( function(mu, extra = NULL) {
+ theta2eta(mu, link = .link, earg = .earg)
+ }, list( .link = link, .earg = earg ))),
+
+ loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ if (residuals) w*(y/mu - 1) else {
+ sum(w * dpois(x=y, lambda=mu, log = TRUE))
+ }
+ },
+ vfamily = "poissonff",
+ deriv = eval(substitute(expression({
+ if ( .link == "loge" && (any(mu < .Machine$double.eps))) {
+ w * (y - mu)
+ } else {
+ lambda <- mu
+ dl.dlambda <- (y-lambda) / lambda
+ dlambda.deta <- dtheta.deta(theta = lambda,
+ link = .link, earg = .earg)
+ w * dl.dlambda * dlambda.deta
+ }
+ }), list( .link = link, .earg = earg ))),
+
+ weight = eval(substitute(expression({
+ if ( .link == "loge" && (any(mu < .Machine$double.eps))) {
+ tmp600 = mu
+ tmp600[tmp600 < .Machine$double.eps] = .Machine$double.eps
+ w * tmp600
+ } else {
+ d2l.dlambda2 = 1 / lambda
+ d2lambda.deta2=d2theta.deta2(theta = lambda,link= .link,earg = .earg)
+ w * dlambda.deta^2 * d2l.dlambda2
+ }
+ }), list( .link = link, .earg = earg ))))
}
+
quasibinomialff = function(link = "logit", mv = FALSE, onedpar = !mv,
parallel = FALSE, zero = NULL) {
dispersion = 0 # Estimated; this is the only difference with binomialff()
@@ -727,7 +754,7 @@ rinv.gaussian = function(n, mu, lambda) {
-poissonqn.control <- function(save.weight=TRUE, ...)
+poissonqn.control <- function(save.weight = TRUE, ...)
{
list(save.weight=save.weight)
}
@@ -751,7 +778,7 @@ poissonqn.control <- function(save.weight=TRUE, ...)
"Link: ", namesof("mu", link, earg = earg), "\n",
"Variance: mu"),
constraints = eval(substitute(expression({
- constraints <- cm.vgam(matrix(1,M,1), x, .parallel, constraints)
+ constraints <- cm.vgam(matrix(1,M, 1), x, .parallel, constraints)
constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .parallel = parallel, .zero = zero ))),
deviance= function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
@@ -886,7 +913,7 @@ poissonqn.control <- function(save.weight=TRUE, ...)
lmean = as.character(substitute(lmean))
if (mode(ldispersion)!= "character" && mode(ldispersion)!= "name")
ldispersion = as.character(substitute(ldispersion))
- if (!is.Numeric(idispersion, posit = TRUE))
+ if (!is.Numeric(idispersion, positive = TRUE))
stop("bad input for 'idispersion'")
if (!is.list(emean)) emean = list()
if (!is.list(edispersion)) edispersion = list()
@@ -894,8 +921,8 @@ poissonqn.control <- function(save.weight=TRUE, ...)
new("vglmff",
blurb = c("Double exponential Poisson distribution\n\n",
"Link: ",
- namesof("mean", lmean, earg= emean), ", ",
- namesof("dispersion", lmean, earg= edispersion), "\n",
+ namesof("mean", lmean, earg = emean), ", ",
+ namesof("dispersion", lmean, earg = edispersion), "\n",
"Mean: ", "mean\n",
"Variance: mean / dispersion"),
constraints = eval(substitute(expression({
@@ -917,14 +944,14 @@ poissonqn.control <- function(save.weight=TRUE, ...)
earg = .edispersion, short = TRUE))
init.mu = pmax(y, 1/8)
if (!length(etastart))
- etastart = cbind(theta2eta(init.mu, link = .lmean,earg= .emean),
+ etastart = cbind(theta2eta(init.mu, link = .lmean,earg = .emean),
theta2eta(rep( .idispersion, len = n),
link = .ldispersion, earg = .edispersion))
}), list( .lmean = lmean, .emean = emean,
.ldispersion = ldispersion, .edispersion = edispersion,
.idispersion = idispersion ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[,1], link = .lmean, earg = .emean)
+ eta2theta(eta[, 1], link = .lmean, earg = .emean)
}, list( .lmean = lmean, .emean = emean,
.ldispersion = ldispersion, .edispersion = edispersion ))),
last = eval(substitute(expression({
@@ -935,8 +962,8 @@ poissonqn.control <- function(save.weight=TRUE, ...)
.ldispersion = ldispersion, .edispersion = edispersion ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- lambda = eta2theta(eta[,1], link = .lmean, earg = .emean)
- Disper = eta2theta(eta[,2], link = .ldispersion, earg = .edispersion)
+ lambda = eta2theta(eta[, 1], link = .lmean, earg = .emean)
+ Disper = eta2theta(eta[, 2], link = .ldispersion, earg = .edispersion)
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
sum(w*(0.5*log(Disper) + Disper*(y-lambda) + Disper*y*log(lambda)))
@@ -945,8 +972,8 @@ poissonqn.control <- function(save.weight=TRUE, ...)
.ldispersion = ldispersion, .edispersion = edispersion ))),
vfamily = "dexppoisson",
deriv = eval(substitute(expression({
- lambda = eta2theta(eta[,1], link = .lmean, earg = .emean)
- Disper = eta2theta(eta[,2], link = .ldispersion, earg = .edispersion)
+ lambda = eta2theta(eta[, 1], link = .lmean, earg = .emean)
+ Disper = eta2theta(eta[, 2], link = .ldispersion, earg = .edispersion)
dl.dlambda = Disper * (y / lambda - 1)
dl.dDisper = y * log(lambda) + y - lambda + 0.5 / Disper
dlambda.deta = dtheta.deta(theta = lambda, link = .lmean, earg = .emean)
@@ -959,8 +986,8 @@ poissonqn.control <- function(save.weight=TRUE, ...)
weight = eval(substitute(expression({
wz = matrix(as.numeric(NA), nrow=n, ncol=2) # diagonal
usethis.lambda = pmax(lambda, .Machine$double.eps / 10000)
- wz[,iam(1,1,M)] = (Disper / usethis.lambda) * dlambda.deta^2
- wz[,iam(2,2,M)] = (0.5 / Disper^2) * dDisper.deta^2
+ wz[,iam(1, 1,M)] = (Disper / usethis.lambda) * dlambda.deta^2
+ wz[,iam(2, 2,M)] = (0.5 / Disper^2) * dDisper.deta^2
c(w) * wz
}), list( .lmean = lmean, .emean = emean,
.ldispersion = ldispersion, .edispersion = edispersion ))))
@@ -977,7 +1004,7 @@ poissonqn.control <- function(save.weight=TRUE, ...)
lmean = as.character(substitute(lmean))
if (mode(ldispersion)!= "character" && mode(ldispersion)!= "name")
ldispersion = as.character(substitute(ldispersion))
- if (!is.Numeric(idispersion, posit = TRUE))
+ if (!is.Numeric(idispersion, positive = TRUE))
stop("bad input for 'idispersion'")
if (!is.list(emean)) emean = list()
if (!is.list(edispersion)) edispersion = list()
@@ -985,8 +1012,8 @@ poissonqn.control <- function(save.weight=TRUE, ...)
new("vglmff",
blurb = c("Double Exponential Binomial distribution\n\n",
"Link: ",
- namesof("mean", lmean, earg= emean), ", ",
- namesof("dispersion", lmean, earg= edispersion), "\n",
+ namesof("mean", lmean, earg = emean), ", ",
+ namesof("dispersion", lmean, earg = edispersion), "\n",
"Mean: ", "mean\n"),
constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
@@ -1026,8 +1053,8 @@ poissonqn.control <- function(save.weight=TRUE, ...)
if (any(abs(y - round(y)) > 1.0e-8))
stop("Count data must be integer-valued")
y = round(y)
- nvec = y[,1] + y[,2]
- y = ifelse(nvec > 0, y[,1] / nvec, 0)
+ nvec = y[, 1] + y[, 2]
+ y = ifelse(nvec > 0, y[, 1] / nvec, 0)
w = w * nvec
init.mu =
mustart = (0.5 + nvec * y) / (1 + nvec)
@@ -1049,14 +1076,14 @@ poissonqn.control <- function(save.weight=TRUE, ...)
namesof("dispersion", link = .ldispersion,
earg = .edispersion, short = TRUE))
if (!length(etastart))
- etastart = cbind(theta2eta(init.mu, link = .lmean,earg= .emean),
+ etastart = cbind(theta2eta(init.mu, link = .lmean,earg = .emean),
theta2eta(rep( .idispersion, len = n),
link = .ldispersion, earg = .edispersion))
}), list( .lmean = lmean, .emean = emean,
.ldispersion = ldispersion, .edispersion = edispersion,
.idispersion = idispersion ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[,1], link = .lmean, earg = .emean)
+ eta2theta(eta[, 1], link = .lmean, earg = .emean)
}, list( .lmean = lmean, .emean = emean,
.ldispersion = ldispersion, .edispersion = edispersion ))),
last = eval(substitute(expression({
@@ -1067,8 +1094,8 @@ poissonqn.control <- function(save.weight=TRUE, ...)
.ldispersion = ldispersion, .edispersion = edispersion ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- prob = eta2theta(eta[,1], link = .lmean, earg = .emean)
- Disper = eta2theta(eta[,2], link = .ldispersion, earg = .edispersion)
+ prob = eta2theta(eta[, 1], link = .lmean, earg = .emean)
+ Disper = eta2theta(eta[, 2], link = .ldispersion, earg = .edispersion)
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
@@ -1084,8 +1111,8 @@ poissonqn.control <- function(save.weight=TRUE, ...)
.ldispersion = ldispersion, .edispersion = edispersion ))),
vfamily = "dexpbinomial",
deriv = eval(substitute(expression({
- prob = eta2theta(eta[,1], link = .lmean, earg = .emean)
- Disper = eta2theta(eta[,2], link = .ldispersion, earg = .edispersion)
+ prob = eta2theta(eta[, 1], link = .lmean, earg = .emean)
+ Disper = eta2theta(eta[, 2], link = .ldispersion, earg = .edispersion)
temp1 = y * log(ifelse(y > 0, y, 1)) # y*log(y)
temp2 = (1.0-y) * log1p(ifelse(y < 1, -y, 0)) # (1-y)*log(1-y)
temp3 = prob * (1.0-prob)
@@ -1102,8 +1129,8 @@ poissonqn.control <- function(save.weight=TRUE, ...)
.ldispersion = ldispersion, .edispersion = edispersion ))),
weight = eval(substitute(expression({
wz = matrix(as.numeric(NA), nrow=n, ncol=2) # diagonal
- wz[,iam(1,1,M)] = w * (Disper / temp3) * dprob.deta^2
- wz[,iam(2,2,M)] = (0.5 / Disper^2) * dDisper.deta^2
+ wz[,iam(1, 1,M)] = w * (Disper / temp3) * dprob.deta^2
+ wz[,iam(2, 2,M)] = (0.5 / Disper^2) * dDisper.deta^2
wz
}), list( .lmean = lmean, .emean = emean,
.ldispersion = ldispersion, .edispersion = edispersion ))))
@@ -1118,7 +1145,7 @@ poissonqn.control <- function(save.weight=TRUE, ...)
if (mode(link )!= "character" && mode(link )!= "name")
link <- as.character(substitute(link))
if (!is.list(earg)) earg = list()
- if (!is.Numeric(smallno, positive=TRUE, allow = 1) || smallno > 1e-4)
+ if (!is.Numeric(smallno, positive = TRUE, allowable.length = 1) || smallno > 1e-4)
stop("bad input for 'smallno'")
if (is.logical(parallel) && !parallel)
stop("'parallel' must be TRUE")
@@ -1134,13 +1161,13 @@ poissonqn.control <- function(save.weight=TRUE, ...)
blurb = c("Matched binomial model (intercepts fitted)\n\n",
"Link: ", namesof("mu[,j]", link, earg = earg)),
constraints = eval(substitute(expression({
- constraints <- cm.vgam(matrix(1,M,1), x, .parallel, constraints,
- intercept.apply=TRUE)
+ constraints <- cm.vgam(matrix(1,M, 1), x, .parallel, constraints,
+ intercept.apply = TRUE)
constraints[[extra$mvar]] <- diag(M)
specialCM = list(a = vector("list", M-1))
for(ii in 1:(M-1)) {
- specialCM[[1]][[ii]] = (constraints[[extra$mvar]])[,1+ii,drop = FALSE]
+ specialCM[[1]][[ii]] = (constraints[[extra$mvar]])[, 1+ii,drop = FALSE]
}
names(specialCM) = extra$mvar
}), list( .parallel = parallel ))),
@@ -1166,8 +1193,8 @@ poissonqn.control <- function(save.weight=TRUE, ...)
} else if (NCOL(y) == 2) {
if (any(abs(y - round(y)) > 0.001))
stop("Count data must be integer-valued")
- nvec = y[,1] + y[,2]
- y = ifelse(nvec > 0, y[,1] / nvec, 0)
+ nvec = y[, 1] + y[, 2]
+ y = ifelse(nvec > 0, y[, 1] / nvec, 0)
w = w * nvec
mustart = (0.5 + nvec * y) / (1 + nvec)
} else
@@ -1177,7 +1204,7 @@ poissonqn.control <- function(save.weight=TRUE, ...)
if (colnames(x)[1] != "(Intercept)") stop("x must have an intercept")
M = CCC = length(temp1[[mvar]]) + (colnames(x)[1] == "(Intercept)")
temp9 = x[,temp1[[mvar]],drop = FALSE]
- temp9 = temp9 * matrix(2:CCC, n, CCC-1, byrow=TRUE)
+ temp9 = temp9 * matrix(2:CCC, n, CCC-1, byrow = TRUE)
temp9 = apply(temp9, 1, max)
temp9[temp9 == 0] = 1
extra$NoMatchedSets = CCC
@@ -1290,7 +1317,7 @@ mypool = function(x, index) {
blurb = c("Matched binomial model (intercepts not fitted)\n\n",
"Link: ", namesof("mu[,j]", link, earg = earg)),
constraints = eval(substitute(expression({
- constraints <- cm.vgam(matrix(1,M,1), x, .parallel, constraints,
+ constraints <- cm.vgam(matrix(1,M, 1), x, .parallel, constraints,
intercept.apply = FALSE)
}), list( .parallel = parallel ))),
initialize = eval(substitute(expression({
@@ -1313,15 +1340,15 @@ mypool = function(x, index) {
} else if (NCOL(y) == 2) {
if (any(abs(y - round(y)) > 0.001))
stop("Count data must be integer-valued")
- nvec = y[,1] + y[,2]
- y = ifelse(nvec > 0, y[,1] / nvec, 0)
+ nvec = y[, 1] + y[, 2]
+ y = ifelse(nvec > 0, y[, 1] / nvec, 0)
w = w * nvec
mustart = (0.5 + nvec * y) / (1 + nvec)
} else
stop("Response not of the right form")
if (!length(etastart))
- etastart <- theta2eta(mustart, link= "logit", earg= list())
+ etastart <- theta2eta(mustart, link= "logit", earg = list())
temp1 = attr(x, "assign")
mvar = extra$mvar
@@ -1484,8 +1511,8 @@ mypool = function(x, index) {
if (any(abs(y - round(y)) > 1.0e-8))
stop("Count data must be integer-valued")
y = round(y)
- nvec = y[,1] + y[,2]
- y = ifelse(nvec > 0, y[,1] / nvec, 0)
+ nvec = y[, 1] + y[, 2]
+ y = ifelse(nvec > 0, y[, 1] / nvec, 0)
w = w * nvec
if (!length(mustart) && !length(etastart))
mustart = (0.5 + nvec * y) / (1 + nvec)
diff --git a/R/family.loglin.R b/R/family.loglin.R
index d1ed8dd..0e25e65 100644
--- a/R/family.loglin.R
+++ b/R/family.loglin.R
@@ -1,11 +1,11 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
-loglinb2 <- function(exchangeable = FALSE, zero = NULL)
+ loglinb2 <- function(exchangeable = FALSE, zero = NULL)
{
new("vglmff",
@@ -100,7 +100,7 @@ loglinb2 <- function(exchangeable = FALSE, zero = NULL)
}
-loglinb3 <- function(exchangeable = FALSE, zero = NULL)
+ loglinb3 <- function(exchangeable = FALSE, zero = NULL)
{
new("vglmff",
@@ -117,8 +117,10 @@ loglinb3 <- function(exchangeable = FALSE, zero = NULL)
initialize = expression({
y <- as.matrix(y)
predictors.names <- c("u1", "u2", "u3", "u12", "u13", "u23")
+
if (ncol(y) != 3)
stop("ncol(y) must be = 3")
+
extra$my.expression <- expression({
u1 <- eta[,1]
u2 <- eta[,2]
@@ -130,6 +132,8 @@ loglinb3 <- function(exchangeable = FALSE, zero = NULL)
exp(u1 + u3 + u13) + exp(u2 + u3 + u23) +
exp(u1 + u2 + u3 + u12 + u13 + u23)
})
+
+
extra$deriv.expression <- expression({
allterms <- exp(u1+u2+u3+u12+u13+u23)
A1 <- exp(u1) + exp(u1 + u2 + u12) + exp(u1 + u3 + u13) +
@@ -142,6 +146,8 @@ loglinb3 <- function(exchangeable = FALSE, zero = NULL)
A13 <- exp(u1 + u3 + u13) + allterms
A23 <- exp(u2 + u3 + u23) + allterms
})
+
+
if (length(mustart) + length(etastart) == 0) {
mustart <- matrix(as.numeric(NA), nrow(y), 2^3)
mustart[,1] <- weighted.mean((1-y[,1])*(1-y[,2])*(1-y[,3]), w)
@@ -156,7 +162,7 @@ loglinb3 <- function(exchangeable = FALSE, zero = NULL)
stop("some combinations of the response not realized")
}
}),
- linkinv= function(eta, extra = NULL) {
+ linkinv = function(eta, extra = NULL) {
eval(extra$my.expression)
cbind("000" = 1,
"001" = exp(u3),
diff --git a/R/family.math.R b/R/family.math.R
index a893a78..cef2156 100644
--- a/R/family.math.R
+++ b/R/family.math.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/family.mixture.R b/R/family.mixture.R
index 72789d8..9dae6f3 100644
--- a/R/family.mixture.R
+++ b/R/family.mixture.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -20,7 +20,7 @@ mix2normal1 = function(lphi = "logit",
ephi = list(), emu1 = list(), emu2 = list(),
esd1 = list(), esd2 = list(),
iphi=0.5, imu1 = NULL, imu2 = NULL, isd1 = NULL, isd2 = NULL,
- qmu=c(0.2, 0.8),
+ qmu = c(0.2, 0.8),
equalsd = TRUE,
nsimEIM = 100,
zero = 1)
@@ -31,18 +31,23 @@ mix2normal1 = function(lphi = "logit",
lmu = as.character(substitute(lmu))
if (mode(lsd) != "character" && mode(lsd) != "name")
lsd = as.character(substitute(lsd))
- if (!is.Numeric(qmu, allow=2, positive = TRUE) || any(qmu >= 1))
- stop("bad input for argument 'qmu'")
- if (length(iphi) && (!is.Numeric(iphi, allow = 1, positive = TRUE) || iphi>= 1))
+ if (!is.Numeric(qmu, allowable.length = 2,
+ positive = TRUE) ||
+ any(qmu >= 1))
+ stop("bad input for argument 'qmu'")
+ if (length(iphi) &&
+ (!is.Numeric(iphi, allowable.length = 1,
+ positive = TRUE) ||
+ iphi>= 1))
stop("bad input for argument 'iphi'")
if (length(imu1) && !is.Numeric(imu1))
- stop("bad input for argument 'imu1'")
+ stop("bad input for argument 'imu1'")
if (length(imu2) && !is.Numeric(imu2))
- stop("bad input for argument 'imu2'")
+ stop("bad input for argument 'imu2'")
if (length(isd1) && !is.Numeric(isd1, positive = TRUE))
- stop("bad input for argument 'isd1'")
+ stop("bad input for argument 'isd1'")
if (length(isd2) && !is.Numeric(isd2, positive = TRUE))
- stop("bad input for argument 'isd2'")
+ stop("bad input for argument 'isd2'")
if (!is.list(ephi)) ephi = list()
if (!is.list(emu1)) emu1 = list()
if (!is.list(emu2)) emu2 = list()
@@ -50,8 +55,10 @@ mix2normal1 = function(lphi = "logit",
if (!is.list(esd2)) esd2 = list()
if (!is.logical(equalsd) || length(equalsd) != 1)
stop("bad input for argument 'equalsd'")
- if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 10)
- stop("'nsimEIM' should be an integer greater than 10")
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE) ||
+ nsimEIM <= 10)
+ stop("'nsimEIM' should be an integer greater than 10")
new("vglmff",
blurb = c("Mixture of two univariate normals\n\n",
@@ -65,9 +72,9 @@ mix2normal1 = function(lphi = "logit",
"Variance: phi*sd1^2 + (1-phi)*sd2^2 + phi*(1-phi)*(mu1-mu2)^2"),
constraints = eval(substitute(expression({
constraints = cm.vgam(rbind(diag(4), c(0,0,1,0)), x, .equalsd,
- constraints, int = TRUE)
+ constraints, intercept.apply = TRUE)
constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list(.zero=zero, .equalsd=equalsd))),
+ }), list( .zero=zero, .equalsd = equalsd ))),
initialize = eval(substitute(expression({
if (ncol(y <- cbind(y)) != 1)
stop("the response must be a vector or one-column matrix")
@@ -78,10 +85,10 @@ mix2normal1 = function(lphi = "logit",
namesof("mu2", .lmu, earg = .emu2, tag = FALSE),
namesof("sd2", .lsd, earg = .esd2, tag = FALSE))
if (!length(etastart)) {
- qy = quantile(y, prob= .qmu)
- init.phi = rep(if(length(.iphi)) .iphi else 0.5, length=n)
- init.mu1 = rep(if(length(.imu1)) .imu1 else qy[1], length=n)
- init.mu2 = rep(if(length(.imu2)) .imu2 else qy[2], length=n)
+ qy = quantile(y, prob = .qmu)
+ init.phi = rep(if(length(.iphi)) .iphi else 0.5, length = n)
+ init.mu1 = rep(if(length(.imu1)) .imu1 else qy[1], length = n)
+ init.mu2 = rep(if(length(.imu2)) .imu2 else qy[2], length = n)
ind.1 = if (init.mu1[1] < init.mu2[1]) 1:round(n* init.phi[1]) else
round(n* init.phi[1]):n
ind.2 = if (init.mu1[1] < init.mu2[1]) round(n* init.phi[1]):n else
@@ -217,25 +224,32 @@ mix2poisson.control <- function(trace = TRUE, ...)
mix2poisson = function(lphi = "logit", llambda = "loge",
ephi = list(), el1 = list(), el2 = list(),
- iphi=0.5, il1 = NULL, il2 = NULL,
- qmu=c(0.2, 0.8), nsimEIM = 100, zero = 1)
+ iphi = 0.5, il1 = NULL, il2 = NULL,
+ qmu = c(0.2, 0.8), nsimEIM = 100, zero = 1)
{
if (mode(lphi) != "character" && mode(lphi) != "name")
lphi = as.character(substitute(lphi))
if (mode(llambda) != "character" && mode(llambda) != "name")
llambda = as.character(substitute(llambda))
- if (!is.Numeric(qmu, allow=2, positive = TRUE) || any(qmu >= 1))
+
+ if (!is.Numeric(qmu, allowable.length = 2, positive = TRUE) ||
+ any(qmu >= 1))
stop("bad input for argument 'qmu'")
- if (length(iphi) && (!is.Numeric(iphi, allow = 1, positive = TRUE) || iphi>= 1))
- stop("bad input for argument 'iphi'")
+ if (length(iphi) &&
+ (!is.Numeric(iphi, allowable.length = 1, positive = TRUE) ||
+ iphi >= 1))
+ stop("bad input for argument 'iphi'")
if (length(il1) && !is.Numeric(il1))
- stop("bad input for argument 'il1'")
+ stop("bad input for argument 'il1'")
if (length(il2) && !is.Numeric(il2))
- stop("bad input for argument 'il2'")
+ stop("bad input for argument 'il2'")
+
if (!is.list(ephi)) ephi = list()
if (!is.list(el1)) el1 = list()
if (!is.list(el2)) el2 = list()
- if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 10)
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE) ||
+ nsimEIM <= 10)
stop("'nsimEIM' should be an integer greater than 10")
new("vglmff",
@@ -256,9 +270,9 @@ mix2poisson = function(lphi = "logit", llambda = "loge",
namesof("lambda2", .llambda, earg = .el2, tag = FALSE))
if (!length(etastart)) {
qy = quantile(y, prob= .qmu)
- init.phi = rep(if(length(.iphi)) .iphi else 0.5, length=n)
- init.lambda1 = rep(if(length(.il1)) .il1 else qy[1], length=n)
- init.lambda2 = rep(if(length(.il2)) .il2 else qy[2], length=n)
+ init.phi = rep(if(length(.iphi)) .iphi else 0.5, length = n)
+ init.lambda1 = rep(if(length(.il1)) .il1 else qy[1], length = n)
+ init.lambda2 = rep(if(length(.il2)) .il2 else qy[2], length = n)
if (!length(etastart))
etastart = cbind(theta2eta(init.phi, .lphi, earg = .ephi),
theta2eta(init.lambda1, .llambda, earg = .el1),
@@ -372,28 +386,33 @@ mix2exp.control <- function(trace = TRUE, ...) {
mix2exp = function(lphi = "logit", llambda = "loge",
ephi = list(), el1 = list(), el2 = list(),
iphi=0.5, il1 = NULL, il2 = NULL,
- qmu=c(0.8, 0.2), nsimEIM = 100, zero = 1)
+ qmu = c(0.8, 0.2), nsimEIM = 100, zero = 1)
{
if (mode(lphi) != "character" && mode(lphi) != "name")
lphi = as.character(substitute(lphi))
if (mode(llambda) != "character" && mode(llambda) != "name")
llambda = as.character(substitute(llambda))
- if (!is.Numeric(qmu, allow=2, positive = TRUE) || any(qmu >= 1))
- stop("bad input for argument 'qmu'")
- if (length(iphi) && (!is.Numeric(iphi, allow = 1, positive = TRUE) ||
+
+ if (!is.Numeric(qmu, allowable.length = 2, positive = TRUE) ||
+ any(qmu >= 1))
+ stop("bad input for argument 'qmu'")
+ if (length(iphi) &&
+ (!is.Numeric(iphi, allowable.length = 1, positive = TRUE) ||
iphi >= 1))
- stop("bad input for argument 'iphi'")
+ stop("bad input for argument 'iphi'")
if (length(il1) && !is.Numeric(il1))
- stop("bad input for argument 'il1'")
+ stop("bad input for argument 'il1'")
if (length(il2) && !is.Numeric(il2))
- stop("bad input for argument 'il2'")
+ stop("bad input for argument 'il2'")
if (!is.list(ephi)) ephi = list()
- if (!is.list(el1)) el1 = list()
- if (!is.list(el2)) el2 = list()
+ if (!is.list(el1)) el1 = list()
+ if (!is.list(el2)) el2 = list()
+
+ if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE) ||
+ nsimEIM <= 10)
+ stop("'nsimEIM' should be an integer greater than 10")
- if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 10)
- stop("'nsimEIM' should be an integer greater than 10")
new("vglmff",
blurb = c("Mixture of two univariate exponentials\n\n",
@@ -402,9 +421,11 @@ mix2exp = function(lphi = "logit", llambda = "loge",
namesof("lambda1", llambda, earg = el1, tag = FALSE), ", ",
namesof("lambda2", llambda, earg = el2, tag = FALSE), "\n",
"Mean: phi/lambda1 + (1-phi)/lambda2\n"),
+
constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list(.zero=zero ))),
+
initialize = eval(substitute(expression({
if (ncol(y <- cbind(y)) != 1)
stop("the response must be a vector or one-column matrix")
@@ -413,9 +434,9 @@ mix2exp = function(lphi = "logit", llambda = "loge",
namesof("lambda2", .llambda, earg = .el2,tag = FALSE))
if (!length(etastart)) {
qy = quantile(y, prob= .qmu)
- init.phi = rep(if(length(.iphi)) .iphi else 0.5, length=n)
- init.lambda1 = rep(if(length(.il1)) .il1 else 1/qy[1], length=n)
- init.lambda2 = rep(if(length(.il2)) .il2 else 1/qy[2], length=n)
+ init.phi = rep(if(length(.iphi)) .iphi else 0.5, length = n)
+ init.lambda1 = rep(if(length(.il1)) .il1 else 1/qy[1], length = n)
+ init.lambda2 = rep(if(length(.il2)) .il2 else 1/qy[2], length = n)
if (!length(etastart))
etastart = cbind(theta2eta(init.phi, .lphi, earg = .ephi),
theta2eta(init.lambda1, .llambda, earg = .el1),
diff --git a/R/family.nonlinear.R b/R/family.nonlinear.R
index 21f897e..fb33a5f 100644
--- a/R/family.nonlinear.R
+++ b/R/family.nonlinear.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -29,7 +29,7 @@ subset_lohi <- function(xvec, yvec,
wtvec = rep(1, len = length(xvec))) {
- if (!is.Numeric(prob.x, allow = 2))
+ if (!is.Numeric(prob.x, allowable.length = 2))
stop("argument 'prob.x' must be numeric and of length two")
min.q <- quantile(xvec, probs = prob.x[1] )
@@ -81,39 +81,34 @@ micmen.control <- function(save.weight = TRUE, ...)
-micmen <- function(rpar = 0.001, divisor = 10,
- init1 = NULL, init2 = NULL,
- imethod = 1,
- oim = TRUE,
- link1 = "identity", link2 = "identity",
- firstDeriv = c("nsimEIM", "rpar"),
- earg1 = list(), earg2 = list(),
- prob.x = c(0.15, 0.85),
- nsimEIM = 500,
- dispersion = 0, zero = NULL)
+ micmen <- function(rpar = 0.001, divisor = 10,
+ init1 = NULL, init2 = NULL,
+ imethod = 1,
+ oim = TRUE,
+ link1 = "identity", link2 = "identity",
+ firstDeriv = c("nsimEIM", "rpar"),
+ earg1 = list(), earg2 = list(),
+ prob.x = c(0.15, 0.85),
+ nsimEIM = 500,
+ dispersion = 0, zero = NULL)
{
-
-
-
firstDeriv <- match.arg(firstDeriv, c("nsimEIM", "rpar"))[1]
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE))
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE))
stop("argument 'imethod' must be integer")
- if (imethod > 3)
- stop("argument 'imethod' must be 1, 2, or 3")
- if (!is.Numeric(prob.x, allow = 2))
+ if (!is.Numeric(prob.x, allowable.length = 2))
stop("argument 'prob.x' must be numeric and of length two")
if (!is.logical(oim) || length(oim) != 1)
stop("argument 'oim' must be single logical")
stopifnot(nsimEIM > 10, length(nsimEIM) == 1, nsimEIM==round(nsimEIM))
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 3)
- stop("'imethod' must be 1 or 2 or 3")
+ stop("'imethod' must be 1 or 2 or 3")
estimated.dispersion <- (dispersion == 0)
@@ -122,6 +117,7 @@ micmen <- function(rpar = 0.001, divisor = 10,
link1 <- as.character(substitute(link1))
if (mode(link2) != "character" && mode(link2) != "name")
link2 <- as.character(substitute(link2))
+
if (!is.list(earg1)) earg1 = list()
if (!is.list(earg2)) earg2 = list()
@@ -133,77 +129,82 @@ micmen <- function(rpar = 0.001, divisor = 10,
namesof("theta2", link2, earg = earg2),
"\n",
"Variance: constant"),
+
constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero, M = 2)
}), list( .zero = zero))),
+
deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
M <- if (is.matrix(y)) ncol(y) else 1
if (residuals) {
if (M > 1) NULL else (y - mu) * sqrt(w)
- } else
+ } else {
rss.vgam(y - mu, w, M = M)
+ }
},
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- if (!length(Xm2))
- stop("regressor not found")
- if (ncol(as.matrix(Xm2)) != 1)
- stop("regressor not found or is not a vector. Use the ",
- "'form2' argument without an intercept")
- Xm2 <- as.vector(Xm2) # Make sure
- extra$Xm2 <- Xm2 # Needed for @linkinv
+ initialize = eval(substitute(expression({
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
- predictors.names <-
- c(namesof("theta1", .link1, earg = .earg1, tag = FALSE),
- namesof("theta2", .link2, earg = .earg2, tag = FALSE))
+ if (!length(Xm2))
+ stop("regressor not found")
+ if (ncol(as.matrix(Xm2)) != 1)
+ stop("regressor not found or is not a vector. Use the ",
+ "'form2' argument without an intercept")
+ Xm2 <- as.vector(Xm2) # Make sure
+ extra$Xm2 <- Xm2 # Needed for @linkinv
- if (length(mustart) || length(coefstart))
- stop("cannot handle 'mustart' or 'coefstart'")
+ predictors.names <-
+ c(namesof("theta1", .link1, earg = .earg1, tag = FALSE),
+ namesof("theta2", .link2, earg = .earg2, tag = FALSE))
- if (!length(etastart)) {
- if ( .imethod == 3 ) {
- index0 <- (1:n)[Xm2 <= quantile(Xm2, prob = .prob.x[2] )]
- init1 <- median(y[index0])
- init2 <- median(init1 * Xm2 / y - Xm2)
- }
- if ( .imethod == 1 || .imethod == 2) {
- mysubset <- subset_lohi(Xm2, y, prob.x = .prob.x,
- type = ifelse( .imethod == 1, "median", "wtmean"),
- wtvec = w)
+ if (length(mustart) || length(coefstart))
+ stop("cannot handle 'mustart' or 'coefstart'")
- mat.x <- with(mysubset, cbind(c(x1bar, x2bar), -c(y1bar, y2bar)))
- theta.temp <- with(mysubset,
- solve(mat.x, c(x1bar * y1bar, x2bar * y2bar)))
- init1 <- theta.temp[1]
- init2 <- theta.temp[2]
+ if (!length(etastart)) {
+ if ( .imethod == 3 ) {
+ index0 <- (1:n)[Xm2 <= quantile(Xm2, prob = .prob.x[2] )]
+ init1 <- median(y[index0])
+ init2 <- median(init1 * Xm2 / y - Xm2)
+ }
+ if ( .imethod == 1 || .imethod == 2) {
+ mysubset <- subset_lohi(Xm2, y, prob.x = .prob.x,
+ type = ifelse( .imethod == 1, "median", "wtmean"),
+ wtvec = w)
+ mat.x <- with(mysubset, cbind(c(x1bar, x2bar), -c(y1bar, y2bar)))
+ theta.temp <- with(mysubset,
+ solve(mat.x, c(x1bar * y1bar, x2bar * y2bar)))
+ init1 <- theta.temp[1]
+ init2 <- theta.temp[2]
- }
+ }
- if (length( .init1 )) init1 <- .init1
- if (length( .init2 )) init2 <- .init2
- etastart <- cbind(
- rep(theta2eta(init1, .link1, earg = .earg1), len = n),
- rep(theta2eta(init2, .link2, earg = .earg2), len = n))
- } else {
- stop("cannot handle 'etastart' or 'mustart'")
- }
+ if (length( .init1 )) init1 <- .init1
+ if (length( .init2 )) init2 <- .init2
+ etastart <- cbind(
+ rep(theta2eta(init1, .link1, earg = .earg1), len = n),
+ rep(theta2eta(init2, .link2, earg = .earg2), len = n))
+ } else {
+ stop("cannot handle 'etastart' or 'mustart'")
+ }
}), list( .init1 = init1, .link1 = link1, .earg1 = earg1,
.init2 = init2, .link2 = link2, .earg2 = earg2,
.imethod = imethod,
.prob.x = prob.x ))),
+
linkinv = eval(substitute(function(eta, extra = NULL) {
- theta1 <- eta2theta(eta[,1], .link1, earg = .earg1)
- theta2 <- eta2theta(eta[,2], .link2, earg = .earg2)
- theta1 * extra$Xm2 / (theta2 + extra$Xm2)
+ theta1 <- eta2theta(eta[, 1], .link1, earg = .earg1)
+ theta2 <- eta2theta(eta[, 2], .link2, earg = .earg2)
+ theta1 * extra$Xm2 / (theta2 + extra$Xm2)
}, list( .link1 = link1, .earg1 = earg1,
.link2 = link2, .earg2 = earg2))),
+
last = eval(substitute(expression({
misc$link <- c(theta1 = .link1, theta2 = .link2)
misc$earg <- list(theta1 = .earg1, theta2 = .earg2 )
@@ -217,8 +218,10 @@ micmen <- function(rpar = 0.001, divisor = 10,
dpar <- sum(w * (y - mu)^2) / (n - ncol_X_vlm)
}
misc$dispersion <- dpar
+
misc$default.dispersion <- 0
misc$estimated.dispersion <- .estimated.dispersion
+
misc$imethod <- .imethod
misc$nsimEIM <- .nsimEIM
misc$firstDeriv <- .firstDeriv
@@ -233,56 +236,60 @@ micmen <- function(rpar = 0.001, divisor = 10,
.oim = oim, .rpar = rpar,
.nsimEIM = nsimEIM,
.estimated.dispersion = estimated.dispersion ))),
+
summary.dispersion = FALSE,
+
vfamily = c("micmen", "vnonlinear"),
+
deriv = eval(substitute(expression({
- theta1 <- eta2theta(eta[,1], .link1, earg = .earg1)
- theta2 <- eta2theta(eta[,2], .link2, earg = .earg2)
- dthetas.detas <- cbind(dtheta.deta(theta1, .link1, earg = .earg1),
- dtheta.deta(theta2, .link2, earg = .earg2))
-
- rpar <- if ( .firstDeriv == "rpar") {
- if (iter > 1) {
- max(rpar / .divisor, 1000 * .Machine$double.eps)
- } else {
- d3 <- deriv3(~ theta1 * Xm2 / (theta2 + Xm2),
- c("theta1", "theta2"), hessian = FALSE)
- .rpar
- }
+ theta1 <- eta2theta(eta[, 1], .link1, earg = .earg1)
+ theta2 <- eta2theta(eta[, 2], .link2, earg = .earg2)
+ dthetas.detas <- cbind(dtheta.deta(theta1, .link1, earg = .earg1),
+ dtheta.deta(theta2, .link2, earg = .earg2))
+
+ rpar <- if ( .firstDeriv == "rpar") {
+ if (iter > 1) {
+ max(rpar / .divisor, 1000 * .Machine$double.eps)
} else {
+ d3 <- deriv3(~ theta1 * Xm2 / (theta2 + Xm2),
+ c("theta1", "theta2"), hessian = FALSE)
.rpar
}
+ } else {
+ .rpar
+ }
- dmus.dthetas <- if (FALSE) {
- attr(eval(d3), "gradient")
- } else {
- dmu.dtheta1 <- Xm2 / (theta2 + Xm2)
- dmu.dtheta2 <- -theta1 * Xm2 / (Xm2 + theta2)^2
- cbind(dmu.dtheta1, dmu.dtheta2)
- }
+ dmus.dthetas <- if (FALSE) {
+ attr(eval(d3), "gradient")
+ } else {
+ dmu.dtheta1 <- Xm2 / (theta2 + Xm2)
+ dmu.dtheta2 <- -theta1 * Xm2 / (Xm2 + theta2)^2
+ cbind(dmu.dtheta1, dmu.dtheta2)
+ }
- myderiv <- if ( .firstDeriv == "rpar") {
- if (TRUE) {
- index <- iam(NA, NA, M = M, both = TRUE)
- temp200809 <- dmus.dthetas * dthetas.detas
- if (M > 1)
- temp200809[, 2:M] <- temp200809[, 2:M] + sqrt(rpar)
- w * (y - mu) * temp200809
- } else {
- w * (y - mu) *
- cbind(dmus.dthetas[,1] * dthetas.detas[,1],
- dmus.dthetas[,2] * dthetas.detas[,2] + sqrt(rpar))
- }
+ myderiv <- if ( .firstDeriv == "rpar") {
+ if (TRUE) {
+ index <- iam(NA, NA, M = M, both = TRUE)
+ temp200809 <- dmus.dthetas * dthetas.detas
+ if (M > 1)
+ temp200809[, 2:M] <- temp200809[, 2:M] + sqrt(rpar)
+ w * (y - mu) * temp200809
} else {
- temp20101111 <- dmus.dthetas * dthetas.detas
- w * (y - mu) * temp20101111
+ w * (y - mu) *
+ cbind(dmus.dthetas[, 1] * dthetas.detas[, 1],
+ dmus.dthetas[, 2] * dthetas.detas[, 2] + sqrt(rpar))
}
+ } else {
+ temp20101111 <- dmus.dthetas * dthetas.detas
+ w * (y - mu) * temp20101111
+ }
- myderiv
+ myderiv
}), list( .link1 = link1, .earg1 = earg1,
.link2 = link2, .earg2 = earg2,
.firstDeriv = firstDeriv,
.rpar = rpar, .divisor = divisor ))),
+
weight = eval(substitute(expression({
if ( .oim ) {
wz <- matrix(0, n, dimm(M))
@@ -300,10 +307,10 @@ micmen <- function(rpar = 0.001, divisor = 10,
if (M > 1)
wz[, 2:M] <- wz[, 2:M] + rpar
} else {
- wz <- cbind(( dmus.dthetas[,1] * dthetas.detas[,1])^2,
- ( dmus.dthetas[,2] * dthetas.detas[,2])^2 + rpar,
- dmus.dthetas[,1] * dmus.dthetas[,2] *
- dthetas.detas[,1] * dthetas.detas[,2])
+ wz <- cbind(( dmus.dthetas[, 1] * dthetas.detas[, 1])^2,
+ ( dmus.dthetas[, 2] * dthetas.detas[, 2])^2 + rpar,
+ dmus.dthetas[, 1] * dmus.dthetas[, 2] *
+ dthetas.detas[, 1] * dthetas.detas[, 2])
}
} else {
run.varcov <- 0
@@ -315,7 +322,8 @@ micmen <- function(rpar = 0.001, divisor = 10,
ysim <- theta1 * Xm2 / (theta2 + Xm2) + rnorm(n, sd = mysigma)
temp3 <- (ysim - mu) * dmus.dthetas * dthetas.detas
run.varcov <- run.varcov +
- temp3[, index0$row.index] * temp3[, index0$col.index]
+ temp3[, index0$row.index] *
+ temp3[, index0$col.index]
}
run.varcov <- run.varcov / .nsimEIM
@@ -361,7 +369,7 @@ skira.control <- function(save.weight = TRUE, ...)
firstDeriv <- match.arg(firstDeriv, c("nsimEIM", "rpar"))[1]
- if (!is.Numeric(prob.x, allow = 2))
+ if (!is.Numeric(prob.x, allowable.length = 2))
stop("argument 'prob.x' must be numeric and of length two")
estimated.dispersion <- dispersion == 0
@@ -370,7 +378,7 @@ skira.control <- function(save.weight = TRUE, ...)
if (mode(link2) != "character" && mode(link2) != "name")
link2 <- as.character(substitute(link2))
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE))
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE))
stop("argument 'imethod' must be integer")
if (imethod > 5)
stop("argument 'imethod' must be 1, 2, 3, 4 or 5")
diff --git a/R/family.normal.R b/R/family.normal.R
index 2304f11..a73b788 100644
--- a/R/family.normal.R
+++ b/R/family.normal.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -12,19 +12,19 @@
VGAM.weights.function = function(w, M, n) {
- ncolw = ncol(as.matrix(w))
- if (ncolw == 1) {
- wz = matrix(w, nrow=n, ncol=M) # w_i * diag(M)
- } else if (ncolw == M) {
- wz = as.matrix(w)
- } else if (ncolw < M && M > 1) {
- stop("ambiguous input for 'weights'")
- } else if (ncolw > M*(M+1)/2) {
- stop("too many columns")
- } else {
- wz = as.matrix(w)
- }
- wz
+ ncolw = ncol(as.matrix(w))
+ if (ncolw == 1) {
+ wz = matrix(w, nrow=n, ncol=M) # w_i * diag(M)
+ } else if (ncolw == M) {
+ wz = as.matrix(w)
+ } else if (ncolw < M && M > 1) {
+ stop("ambiguous input for 'weights'")
+ } else if (ncolw > M*(M+1)/2) {
+ stop("too many columns")
+ } else {
+ wz = as.matrix(w)
+ }
+ wz
}
@@ -40,7 +40,7 @@ VGAM.weights.function = function(w, M, n) {
gaussianff = function(dispersion = 0, parallel = FALSE, zero = NULL)
{
- if (!is.Numeric(dispersion, allow = 1) || dispersion < 0)
+ if (!is.Numeric(dispersion, allowable.length = 1) || dispersion < 0)
stop("bad input for argument 'dispersion'")
estimated.dispersion = dispersion == 0
@@ -48,7 +48,7 @@ VGAM.weights.function = function(w, M, n) {
blurb = c("Vector linear/additive model\n",
"Links: identity for Y1,...,YM"),
constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1, M, 1), x, .parallel, constraints)
+ constraints = cm.vgam(matrix(1, M, 1), x, .parallel , constraints)
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list( .parallel = parallel, .zero = zero ))),
deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
@@ -67,7 +67,7 @@ VGAM.weights.function = function(w, M, n) {
},
initialize = eval(substitute(expression({
if (is.R())
- assign("CQO.FastAlgorithm", TRUE, envir = VGAM:::VGAMenv) else
+ assign("CQO.FastAlgorithm", TRUE, envir = VGAM::VGAMenv) else
CQO.FastAlgorithm <<- TRUE
if (any(function.name == c("cqo","cao")) &&
(length( .zero ) || (is.logical( .parallel ) && .parallel )))
@@ -99,8 +99,8 @@ VGAM.weights.function = function(w, M, n) {
names(misc$link) = predictors.names
if (is.R()) {
- if (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
- rm("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)
+ if (exists("CQO.FastAlgorithm", envir = VGAM::VGAMenv))
+ rm("CQO.FastAlgorithm", envir = VGAM::VGAMenv)
} else {
while (exists("CQO.FastAlgorithm"))
remove("CQO.FastAlgorithm")
@@ -165,16 +165,16 @@ VGAM.weights.function = function(w, M, n) {
dposnorm = function(x, mean = 0, sd = 1, log = FALSE) {
log.arg = log
rm(log)
- if (!is.logical(log.arg) || length(log.arg)!=1)
+ if (!is.logical(log.arg) || length(log.arg) != 1)
stop("bad input for argument 'log'")
L = max(length(x), length(mean), length(sd))
x = rep(x, len = L); mean = rep(mean, len = L); sd = rep(sd, len = L);
if (log.arg) {
- ifelse(x < 0, log(0), dnorm(x, m=mean, sd = sd, log = TRUE) -
- pnorm(mean/sd, log = TRUE))
+ ifelse(x < 0, log(0), dnorm(x, mean = mean, sd = sd, log = TRUE) -
+ pnorm(mean / sd, log.p = TRUE))
} else {
- ifelse(x < 0, 0, dnorm(x = x, me=mean, sd = sd) / pnorm(mean/sd))
+ ifelse(x < 0, 0, dnorm(x = x, mean = mean, sd = sd) / pnorm(mean / sd))
}
}
@@ -188,20 +188,20 @@ pposnorm = function(q, mean = 0, sd = 1) {
qposnorm = function(p, mean = 0, sd = 1) {
- if (!is.Numeric(p, posit = TRUE) || max(p) >= 1)
- stop("bad input for argument 'p'")
+ if (!is.Numeric(p, positive = TRUE) || max(p) >= 1)
+ stop("bad input for argument 'p'")
qnorm(p = p + (1-p) * pnorm(0, mean = mean, sd = sd),
mean = mean, sd = sd)
}
rposnorm = function(n, mean = 0, sd = 1) {
- if (!is.Numeric(n, integ = TRUE, posit = TRUE))
- stop("bad input for argument 'n'")
+ if (!is.Numeric(n, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'n'")
mean = rep(mean, length = n)
sd = rep(sd, length = n)
- qnorm(p = runif(n, min = pnorm(0, m = mean, sd = sd)),
- m = mean, sd = sd)
+ qnorm(p = runif(n, min = pnorm(0, mean = mean, sd = sd)),
+ mean = mean, sd = sd)
}
@@ -225,16 +225,16 @@ rposnorm = function(n, mean = 0, sd = 1) {
if (mode(lsd) != "character" && mode(lsd) != "name")
lsd = as.character(substitute(lsd))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (length(isd) && !is.Numeric(isd, posit = TRUE))
+ if (length(isd) && !is.Numeric(isd, positive = TRUE))
stop("bad input for argument 'isd'")
if (!is.list(emean)) emean = list()
if (!is.list(esd)) esd = list()
if (length(nsimEIM))
- if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 10)
+ if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE) || nsimEIM <= 10)
stop("argument 'nsimEIM' should be an integer greater than 10")
new("vglmff",
@@ -348,28 +348,32 @@ rposnorm = function(n, mean = 0, sd = 1) {
dbetanorm = function(x, shape1, shape2, mean = 0, sd = 1, log = FALSE) {
- log.arg = log
- rm(log)
- if (!is.logical(log.arg) || length(log.arg)!=1)
- stop("bad input for argument 'log'")
- ans =
- if (is.R() && log.arg) {
- dnorm(x = x, mean = mean, sd = sd, log = TRUE) +
- (shape1-1) * pnorm(q = x, mean = mean, sd = sd, log = TRUE) +
- (shape2-1) * pnorm(q = x, mean = mean, sd = sd, log = TRUE,
- lower = FALSE) -
- lbeta(shape1, shape2)
- } else {
- dnorm(x = x, mean = mean, sd = sd) *
- pnorm(q = x, mean = mean, sd = sd)^(shape1-1) *
- pnorm(q = x, mean = mean, sd = sd,
- lower = FALSE)^(shape2-1) / beta(shape1, shape2)
- }
- if (!is.R() && log.arg) ans = log(ans)
- ans
+ log.arg = log
+ rm(log)
+ if (!is.logical(log.arg) ||
+ length(log.arg) != 1)
+ stop("bad input for argument 'log'")
+
+ ans =
+ if (log.arg) {
+ dnorm(x = x, mean = mean, sd = sd, log = TRUE) +
+ (shape1-1) * pnorm(q = x, mean = mean, sd = sd, log.p = TRUE) +
+ (shape2-1) * pnorm(q = x, mean = mean, sd = sd, log.p = TRUE,
+ lower.tail = FALSE) -
+ lbeta(shape1, shape2)
+ } else {
+ dnorm(x = x, mean = mean, sd = sd) *
+ pnorm(q = x, mean = mean, sd = sd)^(shape1-1) *
+ pnorm(q = x, mean = mean, sd = sd,
+ lower.tail = FALSE)^(shape2-1) / beta(shape1, shape2)
+ }
+
+ ans
}
+
+
pbetanorm = function(q, shape1, shape2, mean = 0, sd = 1,
lower.tail = TRUE, log.p = FALSE) {
pbeta(q=pnorm(q = q, mean = mean, sd = sd),
@@ -379,17 +383,17 @@ pbetanorm = function(q, shape1, shape2, mean = 0, sd = 1,
qbetanorm = function(p, shape1, shape2, mean = 0, sd = 1) {
- if (!is.Numeric(p, posit = TRUE) || max(p) >= 1)
+ if (!is.Numeric(p, positive = TRUE) || max(p) >= 1)
stop("bad input for argument 'p'")
qnorm(p=qbeta(p=p, shape1=shape1, shape2=shape2), mean = mean, sd = sd)
}
rbetanorm = function(n, shape1, shape2, mean = 0, sd = 1) {
- if (!is.Numeric(n, integ = TRUE, posit = TRUE))
- stop("bad input for argument 'n'")
- qnorm(p=qbeta(p=runif(n), shape1=shape1, shape2=shape2),
- mean = mean, sd = sd)
+ if (!is.Numeric(n, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'n'")
+ qnorm(p = qbeta(p = runif(n), shape1 = shape1, shape2 = shape2),
+ mean = mean, sd = sd)
}
@@ -400,10 +404,13 @@ dtikuv = function(x, d, mean = 0, sigma = 1, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- if (!is.Numeric(d, allow = 1) || max(d) >= 2)
- stop("bad input for argument 'd'")
+ if (!is.Numeric(d, allowable.length = 1) ||
+ max(d) >= 2)
+ stop("bad input for argument 'd'")
+
L = max(length(x), length(mean), length(sigma))
- x = rep(x, len = L); mean = rep(mean, len = L); sigma = rep(sigma, len = L);
+ x = rep(x, len = L); mean = rep(mean, len = L);
+ sigma = rep(sigma, len = L);
hh = 2 - d
KK = 1 / (1 + 1/hh + 0.75/hh^2)
if (log.arg) {
@@ -417,10 +424,13 @@ dtikuv = function(x, d, mean = 0, sigma = 1, log = FALSE) {
ptikuv = function(q, d, mean = 0, sigma=1) {
- if (!is.Numeric(d, allow = 1) || max(d) >= 2)
- stop("bad input for argument 'd'")
+ if (!is.Numeric(d, allowable.length = 1) ||
+ max(d) >= 2)
+ stop("bad input for argument 'd'")
+
L = max(length(q), length(mean), length(sigma))
- q = rep(q, len = L); mean = rep(mean, len = L); sigma = rep(sigma, len = L);
+ q = rep(q, len = L); mean = rep(mean, len = L);
+ sigma = rep(sigma, len = L);
zedd1 = 0.5 * ((q - mean) / sigma)^2
ans = q*0 + 0.5
hh = 2 - d
@@ -432,17 +442,17 @@ ptikuv = function(q, d, mean = 0, sigma=1) {
gamma(2.5) * (1 - pgamma(zedd1[lhs], 2.5)) / hh^2)
}
if (any(rhs <- q > mean)) {
- ans[rhs] = 1.0 - Recall(q=(2*mean[rhs]-q[rhs]), d=d,
- mean = mean[rhs], sigma=sigma[rhs])
+ ans[rhs] = 1.0 - Recall(q = (2*mean[rhs] - q[rhs]), d = d,
+ mean = mean[rhs], sigma = sigma[rhs])
}
ans
}
qtikuv = function(p, d, mean = 0, sigma = 1, ...) {
- if (!is.Numeric(p, posit = TRUE) || max(p) >= 1)
+ if (!is.Numeric(p, positive = TRUE) || max(p) >= 1)
stop("bad input for argument 'p'")
- if (!is.Numeric(d, allow = 1) || max(d) >= 2)
+ if (!is.Numeric(d, allowable.length = 1) || max(d) >= 2)
stop("bad input for argument 'd'")
if (!is.Numeric(mean))
stop("bad input for argument 'mean'")
@@ -452,60 +462,61 @@ qtikuv = function(p, d, mean = 0, sigma = 1, ...) {
p = rep(p, len = L); mean = rep(mean, len = L); sigma = rep(sigma, len = L);
ans = rep(0.0, len = L)
myfun = function(x, d, mean = 0, sigma = 1, p)
- ptikuv(q = x, d=d, mean = mean, sigma=sigma) - p
+ ptikuv(q = x, d = d, mean = mean, sigma = sigma) - p
for(i in 1:L) {
Lower = ifelse(p[i] <= 0.5, mean[i] - 3 * sigma[i], mean[i])
- while (ptikuv(q = Lower, d=d, mean = mean[i], sigma=sigma[i]) > p[i])
+ while (ptikuv(q = Lower, d = d, mean = mean[i], sigma = sigma[i]) > p[i])
Lower = Lower - sigma[i]
Upper = ifelse(p[i] >= 0.5, mean[i] + 3 * sigma[i], mean[i])
- while (ptikuv(q = Upper, d=d, mean = mean[i], sigma=sigma[i]) < p[i])
+ while (ptikuv(q = Upper, d = d, mean = mean[i], sigma = sigma[i]) < p[i])
Upper = Upper + sigma[i]
- ans[i] = uniroot(f=myfun, lower = Lower, upper = Upper, d=d, p=p[i],
- mean = mean[i], sigma=sigma[i], ...)$root
+ ans[i] = uniroot(f=myfun, lower = Lower, upper = Upper, d = d, p=p[i],
+ mean = mean[i], sigma = sigma[i], ...)$root
}
ans
}
-rtikuv = function(n, d, mean = 0, sigma = 1, Smallno=1.0e-6) {
- if (!is.Numeric(n, posit = TRUE, integ = TRUE))
- stop("bad input for argument 'n'")
- if (!is.Numeric(d, allow = 1) || max(d) >= 2)
- stop("bad input for argument 'd'")
- if (!is.Numeric(mean, allow = 1))
- stop("bad input for argument 'mean'")
- if (!is.Numeric(sigma, allow = 1))
- stop("bad input for argument 'sigma'")
- if (!is.Numeric(Smallno, posit = TRUE, allow = 1) ||
- Smallno > 0.01 ||
- Smallno < 2 * .Machine$double.eps)
- stop("bad input for argument 'Smallno'")
- ans = rep(0.0, len = n)
-
- ptr1 = 1; ptr2 = 0
- hh = 2 - d
- KK = 1 / (1 + 1/hh + 0.75/hh^2)
- ymax = ifelse(hh < 2,
- dtikuv(x=mean + sigma*sqrt(4 - 2*hh),
- d=d, m=mean, s=sigma),
- KK / (sqrt(2 * pi) * sigma))
- while (ptr2 < n) {
- Lower = mean - 5 * sigma
- while (ptikuv(q = Lower, d=d, mean = mean, sigma=sigma) > Smallno)
- Lower = Lower - sigma
- Upper = mean + 5 * sigma
- while (ptikuv(q = Upper, d=d, mean = mean, sigma=sigma) < 1-Smallno)
- Upper = Upper + sigma
- x = runif(2*n, min = Lower, max = Upper)
- index = runif(2*n, max=ymax) < dtikuv(x,d=d,m=mean,s=sigma)
- sindex = sum(index)
- if (sindex) {
- ptr2 = min(n, ptr1 + sindex - 1)
- ans[ptr1:ptr2] = (x[index])[1:(1+ptr2-ptr1)]
- ptr1 = ptr2 + 1
- }
+rtikuv = function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
+ if (!is.Numeric(n, positive = TRUE, integer.valued = TRUE))
+ stop("bad input for argument 'n'")
+ if (!is.Numeric(d, allowable.length = 1) || max(d) >= 2)
+ stop("bad input for argument 'd'")
+ if (!is.Numeric(mean, allowable.length = 1))
+ stop("bad input for argument 'mean'")
+ if (!is.Numeric(sigma, allowable.length = 1))
+ stop("bad input for argument 'sigma'")
+ if (!is.Numeric(Smallno, positive = TRUE, allowable.length = 1) ||
+ Smallno > 0.01 ||
+ Smallno < 2 * .Machine$double.eps)
+ stop("bad input for argument 'Smallno'")
+ ans = rep(0.0, len = n)
+
+ ptr1 = 1; ptr2 = 0
+ hh = 2 - d
+ KK = 1 / (1 + 1/hh + 0.75/hh^2)
+ ymax = ifelse(hh < 2,
+ dtikuv(x = mean + sigma*sqrt(4 - 2*hh),
+ d = d, mean = mean, sigma = sigma),
+ KK / (sqrt(2 * pi) * sigma))
+ while (ptr2 < n) {
+ Lower = mean - 5 * sigma
+ while (ptikuv(q = Lower, d = d, mean = mean, sigma = sigma) > Smallno)
+ Lower = Lower - sigma
+ Upper = mean + 5 * sigma
+ while (ptikuv(q = Upper, d = d, mean = mean, sigma = sigma) < 1-Smallno)
+ Upper = Upper + sigma
+ x = runif(2*n, min = Lower, max = Upper)
+ index = runif(2*n, max = ymax) <
+ dtikuv(x, d = d, mean = mean, sigma = sigma)
+ sindex = sum(index)
+ if (sindex) {
+ ptr2 = min(n, ptr1 + sindex - 1)
+ ans[ptr1:ptr2] = (x[index])[1:(1+ptr2-ptr1)]
+ ptr1 = ptr2 + 1
}
- ans
+ }
+ ans
}
@@ -519,10 +530,10 @@ rtikuv = function(n, d, mean = 0, sigma = 1, Smallno=1.0e-6) {
lmean = as.character(substitute(lmean))
if (mode(lsigma) != "character" && mode(lsigma) != "name")
lsigma = as.character(substitute(lsigma))
- if (length(zero) && (!is.Numeric(zero, integer = TRUE, posit = TRUE) ||
+ if (length(zero) && (!is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
max(zero) > 2))
stop("bad input for argument 'zero'")
- if (!is.Numeric(d, allow = 1) || max(d) >= 2)
+ if (!is.Numeric(d, allowable.length = 1) || max(d) >= 2)
stop("bad input for argument 'd'")
if (!is.list(emean)) emean = list()
if (!is.list(esigma)) esigma = list()
@@ -555,7 +566,7 @@ rtikuv = function(n, d, mean = 0, sigma = 1, Smallno=1.0e-6) {
etastart = cbind(theta2eta(mean.init, .lmean, earg = .emean),
theta2eta(sigma.init, .lsigma, earg = .esigma))
}
- }),list( .lmean = lmean, .lsigma=lsigma, .isigma=isigma, .d=d,
+ }),list( .lmean = lmean, .lsigma=lsigma, .isigma=isigma, .d = d,
.emean = emean, .esigma=esigma ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
eta2theta(eta[,1], .lmean, earg = .emean)
@@ -566,7 +577,7 @@ rtikuv = function(n, d, mean = 0, sigma = 1, Smallno=1.0e-6) {
misc$earg = list("mean"= .emean, "sigma"= .esigma )
misc$expected = TRUE
misc$d = .d
- }), list( .lmean = lmean, .lsigma=lsigma, .d=d,
+ }), list( .lmean = lmean, .lsigma=lsigma, .d = d,
.emean = emean, .esigma=esigma ))),
loglikelihood=eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
@@ -574,10 +585,11 @@ rtikuv = function(n, d, mean = 0, sigma = 1, Smallno=1.0e-6) {
sigma = eta2theta(eta[,2], .lsigma, earg = .esigma)
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w * dtikuv(x=y, d= .d, mean = mymu, sigma=sigma, log = TRUE))
+ sum(w * dtikuv(x=y, d = .d , mean = mymu,
+ sigma = sigma, log = TRUE))
}
- }, list( .lmean = lmean, .lsigma=lsigma, .d=d,
- .emean = emean, .esigma=esigma ))),
+ }, list( .lmean = lmean, .lsigma = lsigma, .d = d,
+ .emean = emean, .esigma = esigma ))),
vfamily=c("tikuv"),
deriv = eval(substitute(expression({
mymu = eta2theta(eta[,1], .lmean, earg = .emean)
@@ -591,7 +603,7 @@ rtikuv = function(n, d, mean = 0, sigma = 1, Smallno=1.0e-6) {
dl.dsigma = (zedd^2 - 1 - 2 * zedd * gzedd / hh) / sigma
c(w) * cbind(dl.dmu * dmu.deta,
dl.dsigma * dsigma.deta)
- }), list( .lmean = lmean, .lsigma=lsigma, .d=d,
+ }), list( .lmean = lmean, .lsigma=lsigma, .d = d,
.emean = emean, .esigma=esigma ))),
weight = eval(substitute(expression({
ayy = 1 / (2*hh)
@@ -610,7 +622,7 @@ rtikuv = function(n, d, mean = 0, sigma = 1, Smallno=1.0e-6) {
dfnorm = function(x, mean = 0, sd = 1, a1 = 1, a2=1) {
- if (!is.Numeric(a1, posit = TRUE) || !is.Numeric(a2, posit = TRUE))
+ if (!is.Numeric(a1, positive = TRUE) || !is.Numeric(a2, positive = TRUE))
stop("bad input for arguments 'a1' and 'a2'")
if (any(a1 <= 0 | a2 <= 0))
stop("arguments 'a1' and 'a2' must have positive values only")
@@ -622,20 +634,21 @@ dfnorm = function(x, mean = 0, sd = 1, a1 = 1, a2=1) {
}
pfnorm = function(q, mean = 0, sd = 1, a1 = 1, a2=1) {
- if (!is.Numeric(a1, posit = TRUE) || !is.Numeric(a2, posit = TRUE))
+ if (!is.Numeric(a1, positive = TRUE) || !is.Numeric(a2, positive = TRUE))
stop("bad input for arguments 'a1' and 'a2'")
if (any(a1 <= 0 | a2 <= 0))
stop("arguments 'a1' and 'a2' must have positive values only")
L = max(length(q), length(mean), length(sd))
q = rep(q, len = L); mean = rep(mean, len = L); sd = rep(sd, len = L);
ifelse(q < 0, 0,
- pnorm(q = q/(a1*sd) - mean/sd) - pnorm(q=-q/(a2*sd) - mean/sd))
+ pnorm(q = q/(a1*sd) - mean/sd) -
+ pnorm(q = -q/(a2*sd) - mean/sd))
}
qfnorm = function(p, mean = 0, sd = 1, a1 = 1, a2 = 1, ...) {
- if (!is.Numeric(p, posit = TRUE) || max(p) >= 1)
+ if (!is.Numeric(p, positive = TRUE) || max(p) >= 1)
stop("bad input for argument 'p'")
- if (!is.Numeric(a1, posit = TRUE) || !is.Numeric(a2, posit = TRUE))
+ if (!is.Numeric(a1, positive = TRUE) || !is.Numeric(a2, positive = TRUE))
stop("bad input for arguments 'a1' and 'a2'")
if (any(a1 <= 0 | a2 <= 0))
stop("arguments 'a1' and 'a2' must have positive values only")
@@ -648,7 +661,8 @@ qfnorm = function(p, mean = 0, sd = 1, a1 = 1, a2 = 1, ...) {
pfnorm(q = x, mean = mean, sd = sd, a1 = a1, a2 = a2) - p
for(i in 1:L) {
mytheta = mean[i]/sd[i]
- EY = sd[i] * ((a1[i]+a2[i]) * (mytheta * pnorm(mytheta) + dnorm(mytheta)) -
+ EY = sd[i] * ((a1[i]+a2[i]) *
+ (mytheta * pnorm(mytheta) + dnorm(mytheta)) -
a2[i] * mytheta)
Upper = 2 * EY
while (pfnorm(q = Upper, mean = mean[i], sd = sd[i],
@@ -662,9 +676,9 @@ qfnorm = function(p, mean = 0, sd = 1, a1 = 1, a2 = 1, ...) {
}
rfnorm = function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
- if (!is.Numeric(n, integ = TRUE, posit = TRUE))
+ if (!is.Numeric(n, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'n'")
- if (!is.Numeric(a1, posit = TRUE) || !is.Numeric(a2, posit = TRUE))
+ if (!is.Numeric(a1, positive = TRUE) || !is.Numeric(a2, positive = TRUE))
stop("bad input for arguments 'a1' and 'a2'")
if (any(a1 <= 0 | a2 <= 0))
stop("arguments 'a1' and 'a2' must have positive values only")
@@ -681,12 +695,12 @@ rfnorm = function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
a1 = 1, a2 = 1,
nsimEIM = 500, imethod = 1, zero = NULL)
{
- if (!is.Numeric(a1, posit = TRUE, allow = 1) ||
- !is.Numeric(a2, posit = TRUE, allow = 1))
+ if (!is.Numeric(a1, positive = TRUE, allowable.length = 1) ||
+ !is.Numeric(a2, positive = TRUE, allowable.length = 1))
stop("bad input for arguments 'a1' and 'a2'")
if (any(a1 <= 0 | a2 <= 0))
stop("arguments 'a1' and 'a2' must each be a positive value")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 2)
stop("argument 'imethod' must be 1 or 2")
@@ -694,17 +708,17 @@ rfnorm = function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
lmean = as.character(substitute(lmean))
if (mode(lsd) != "character" && mode(lsd) != "name")
lsd = as.character(substitute(lsd))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(emean)) emean = list()
if (!is.list(esd)) esd = list()
- if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 10)
+ if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE) || nsimEIM <= 10)
stop("argument 'nsimEIM' should be an integer greater than 10")
if (length(imean) && !is.Numeric(imean))
stop("bad input for 'imean'")
- if (length(isd) && !is.Numeric(isd, posit = TRUE))
+ if (length(isd) && !is.Numeric(isd, positive = TRUE))
stop("bad input for 'isd'")
new("vglmff",
@@ -852,12 +866,12 @@ lqnorm = function(qpower = 2, link = "identity", earg = list(),
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) eerg = list()
- if (!is.Numeric(qpower, allow = 1) || qpower <= 1)
+ if (!is.Numeric(qpower, allowable.length = 1) || qpower <= 1)
stop("bad input for argument 'qpower'")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 3)
stop("argument 'imethod' must be 1 or 2 or 3")
- if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
+ if (!is.Numeric(shrinkage.init, allowable.length = 1) || shrinkage.init < 0 ||
shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
new("vglmff",
@@ -946,7 +960,7 @@ dtobit = function(x, mean = 0, sd = 1,
ind3 <- x == Lower
ans[ind3] = if (log.arg) {
log(exp(ans[ind3]) +
- pnorm(q = Lower[ind3], me = mean[ind3], sd = sd[ind3]))
+ pnorm(q = Lower[ind3], mean = mean[ind3], sd = sd[ind3]))
} else {
ans[ind3] +
pnorm(q = Lower[ind3], mean = mean[ind3], sd = sd[ind3])
@@ -1025,7 +1039,7 @@ rtobit = function(n, mean = 0, sd = 1,
Lower = 0, Upper = Inf) {
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
+ if (!is.Numeric(n, integer.valued = TRUE, allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
L = max(use.n, length(mean), length(sd), length(Lower),
length(Upper))
@@ -1072,7 +1086,8 @@ tobit.control <- function(save.weight = TRUE, ...)
if (mode(lsd) != "character" && mode(lsd) != "name")
lsd = as.character(substitute(lsd))
- if (!is.Numeric(imethod, allow = 1, integer = TRUE, posi = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 2)
stop("argument 'imethod' must be 1 or 2")
if ( # length(Lower) != 1 || length(Upper) != 1 ||
@@ -1081,9 +1096,9 @@ tobit.control <- function(save.weight = TRUE, ...)
stop("Lower and Upper must ",
"be numeric with Lower < Upper")
if (length(zero) &&
- !is.Numeric(zero, integer = TRUE))
+ !is.Numeric(zero, integer.valued = TRUE))
stop("bad input for argument 'zero'")
- if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) ||
+ if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE) ||
nsimEIM <= 10)
stop("argument 'nsimEIM' should be an integer greater than 10")
@@ -1269,9 +1284,9 @@ tobit.control <- function(save.weight = TRUE, ...)
ell0 = dnorm( y[cen0], mean = mum[cen0], sd = sdm[cen0],
log = TRUE)
ellL = pnorm(Lowmat[cenL], mean = mum[cenL], sd = sdm[cenL],
- log = TRUE, lower.tail = TRUE)
+ log.p = TRUE, lower.tail = TRUE)
ellU = pnorm(Uppmat[cenU], mean = mum[cenU], sd = sdm[cenU],
- log = TRUE, lower.tail = FALSE)
+ log.p = TRUE, lower.tail = FALSE)
wmat = matrix(w, nrow = nrow(eta), ncol = ncoly)
if (residuals) {
@@ -1453,45 +1468,68 @@ tobit.control <- function(save.weight = TRUE, ...)
- normal1 <- function(lmean = "identity", lsd = "loge",
- emean = list(), esd = list(),
+ normal1 <- function(lmean = "identity", lsd = "loge", lvar = "loge",
+ emean = list(), esd = list(), evar = list(),
+ var.arg = FALSE,
imethod = 1,
+ isd = NULL,
+ parallel = FALSE,
+ intercept.apply = FALSE,
zero = -2)
{
- lsdev <- lsd
- esdev <- esd
if (mode(lmean) != "character" && mode(lmean) != "name")
lmean <- as.character(substitute(lmean))
- if (mode(lsdev) != "character" && mode(lsdev) != "name")
- lsdev <- as.character(substitute(lsdev))
+ if (mode(lsd) != "character" && mode(lsd) != "name")
+ lsd <- as.character(substitute(lsd))
if (length(zero) &&
- !is.Numeric(zero, integer = TRUE))
+ !is.Numeric(zero, integer.valued = TRUE))
stop("bad input for argument 'zero'")
+
if (!is.list(emean)) emean <- list()
- if (!is.list(esdev)) esdev <- list()
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.list(esd)) esd <- list()
+ if (!is.list(evar)) evar <- list()
+
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 3)
stop("argument 'imethod' must be 1 or 2 or 3")
+ if (!is.logical(var.arg) || length(var.arg) != 1)
+ stop("argument 'var.arg' must be a single logical")
+ if (!is.logical(intercept.apply) || length(intercept.apply) != 1)
+ stop("argument 'intercept.apply' must be a single logical")
new("vglmff",
blurb = c("Univariate normal distribution\n\n",
"Links: ",
namesof("mean", lmean, earg = emean, tag = TRUE), "; ",
- namesof("sd", lsdev, earg = esdev, tag = TRUE), "\n",
- "Variance: sd^2"),
+ if (var.arg)
+ namesof("var", lvar, earg = evar, tag = TRUE) else
+ namesof("sd" , lsd, earg = esd, tag = TRUE),
+ "\n",
+ if (var.arg) "Variance: var" else "Variance: sd^2"),
+
+
+
constraints = eval(substitute(expression({
+
+ constraints = cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
+ intercept.apply = .intercept.apply )
+
dotzero <- .zero
Musual <- 2
eval(negzero.expression)
- }), list( .zero = zero ))),
- infos = eval(substitute(function(...) {
- list(Musual = 2,
- zero = .zero)
- }, list( .zero = zero ))),
+ }), list( .zero = zero,
+ .parallel = parallel, .intercept.apply = intercept.apply ))),
+
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ zero = .zero)
+ }, list( .zero = zero ))),
+
initialize = eval(substitute(expression({
orig.y <- y
y <- cbind(y)
@@ -1505,6 +1543,7 @@ tobit.control <- function(save.weight = TRUE, ...)
+
if (length(attr(orig.y, "Prior.Weights"))) {
if (any(c(w) != 1))
warning("replacing the 'weights' argument by the 'Prior.Weights'",
@@ -1526,12 +1565,15 @@ tobit.control <- function(save.weight = TRUE, ...)
}
-
- mynames1 <- paste("mean", if (ncoly > 1) 1:ncoly else "", sep = "")
- mynames2 <- paste("sd", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames1 <- paste("mean",
+ if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames2 <- paste(if ( .var.arg ) "var" else "sd",
+ if (ncoly > 1) 1:ncoly else "", sep = "")
predictors.names <-
c(namesof(mynames1, .lmean, earg = .emean, tag = FALSE),
- namesof(mynames2, .lsdev, earg = .esdev, tag = FALSE))
+ if ( .var.arg )
+ namesof(mynames2, .lvar , earg = .evar , tag = FALSE) else
+ namesof(mynames2, .lsd , earg = .esd , tag = FALSE))
predictors.names <- predictors.names[interleave.VGAM(M, M = Musual)]
extra$predictors.names <- predictors.names
@@ -1545,10 +1587,11 @@ tobit.control <- function(save.weight = TRUE, ...)
if( .imethod == 1) median(y[, jay]) else
if( .imethod == 2) weighted.mean(y[, jay], w = w[, jay]) else
mean(jfit$fitted)
+
sdev.init[, jay] <-
- if( .imethod == 1)
- sqrt( sum(w * (y[, jay] - mean.init[, jay])^2) / sum(w[, jay]) ) else
- if( .imethod == 2) {
+ if( .imethod == 1) {
+ sqrt( sum(w * (y[, jay] - mean.init[, jay])^2) / sum(w[, jay]) )
+ } else if( .imethod == 2) {
if (jfit$df.resid > 0)
sqrt( sum(w[, jay] * jfit$resid^2) / jfit$df.resid ) else
sqrt( sum(w[, jay] * jfit$resid^2) / sum(w[, jay]) )
@@ -1561,23 +1604,37 @@ tobit.control <- function(save.weight = TRUE, ...)
sdev.init[, jay] <- 1.01
}
- etastart <- cbind(theta2eta(mean.init, .lmean, earg = .emean),
- theta2eta(sdev.init, .lsdev, earg = .esdev))
+
+
+ if (length( .isd )) {
+ sdev.init <- matrix( .isd , n, ncoly, byrow = TRUE)
+ }
+
+
+ etastart <- cbind(theta2eta(mean.init, .lmean , earg = .emean ),
+ if ( .var.arg )
+ theta2eta(sdev.init^2, .lvar , earg = .evar ) else
+ theta2eta(sdev.init , .lsd , earg = .esd ))
etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
colnames(etastart) <- predictors.names
}
- }), list( .lmean = lmean, .lsdev = lsdev,
- .emean = emean, .esdev = esdev,
- .imethod = imethod ))),
+ }), list( .lmean = lmean, .lsd = lsd, .lvar = lvar,
+ .emean = emean, .esd = esd, .evar = evar,
+ .isd = isd,
+ .var.arg = var.arg, .imethod = imethod ))),
+
linkinv = eval(substitute(function(eta, extra = NULL) {
+ Musual <- extra$Musual
ncoly <- extra$ncoly
- eta2theta(eta[, 2*(1:ncoly) - 1], .lmean, earg = .emean)
- }, list( .lmean = lmean, .emean = emean, .esdev = esdev ))),
+ eta2theta(eta[, Musual*(1:ncoly) - 1], .lmean , earg = .emean )
+ }, list( .lmean = lmean,
+ .emean = emean, .esd = esd , .evar = evar ))),
+
last = eval(substitute(expression({
Musual <- extra$Musual
- misc$link <- c(rep( .lmean, length = ncoly),
- rep( .lsdev, length = ncoly))
+ misc$link <- c(rep( .lmean , length = ncoly),
+ rep( .lsd , length = ncoly))
misc$link <- misc$link[interleave.VGAM(Musual * ncoly, M = Musual)]
temp.names <- c(mynames1, mynames2)
temp.names <- temp.names[interleave.VGAM(Musual * ncoly, M = Musual)]
@@ -1588,51 +1645,88 @@ tobit.control <- function(save.weight = TRUE, ...)
names(misc$earg) <- temp.names
for(ii in 1:ncoly) {
misc$earg[[Musual*ii-1]] <- .emean
- misc$earg[[Musual*ii ]] <- .esdev
+ misc$earg[[Musual*ii ]] <- if ( .var.arg) .evar else .esd
}
names(misc$earg) <- temp.names
+ misc$var.arg <- .var.arg
misc$Musual <- Musual
misc$expected <- TRUE
misc$imethod <- .imethod
- }), list( .lmean = lmean, .lsdev = lsdev,
- .emean = emean, .esdev = esdev,
- .imethod = imethod ))),
+ }), list( .lmean = lmean, .lsd = lsd, .lvar = lvar,
+ .emean = emean, .esd = esd, .evar = evar,
+ .var.arg = var.arg, .imethod = imethod ))),
+
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
ncoly <- extra$ncoly
- sdev <- eta2theta(eta[, 2*(1:ncoly) ], .lsdev, earg = .esdev)
+ Musual <- extra$Musual
+ if ( .var.arg ) {
+ Varm <- eta2theta(eta[, Musual*(1:ncoly) ], .lvar , earg = .evar )
+ sdev <- sqrt(Varm)
+ } else {
+ sdev <- eta2theta(eta[, Musual*(1:ncoly) ], .lsd , earg = .esd )
+ }
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(w * dnorm(y, m = mu, sd = sdev, log = TRUE))
}
- }, list( .lsdev = lsdev,
- .esdev = esdev ))),
+ }, list( .lsd = lsd, .lvar = lvar,
+ .esd = esd, .evar = evar,
+ .var.arg = var.arg ))),
vfamily = c("normal1"),
deriv = eval(substitute(expression({
ncoly <- extra$ncoly
+ Musual <- extra$Musual
+
+ mymu <- eta2theta(eta[, Musual*(1:ncoly) - 1], .lmean , earg = .emean )
+ if ( .var.arg ) {
+ Varm <- eta2theta(eta[, Musual*(1:ncoly) ], .lvar , earg = .evar )
+ sdev <- sqrt(Varm)
+ } else {
+ sdev <- eta2theta(eta[, Musual*(1:ncoly) ], .lsd , earg = .esd )
+ }
- mymu <- eta2theta(eta[, 2*(1:ncoly) - 1], .lmean, earg = .emean)
- sdev <- eta2theta(eta[, 2*(1:ncoly) ], .lsdev, earg = .esdev)
dl.dmu <- (y - mymu) / sdev^2
- dl.dsd <- -1 / sdev + (y - mymu)^2 / sdev^3
- dmu.deta <- dtheta.deta(mymu, .lmean, earg = .emean)
- dsd.deta <- dtheta.deta(sdev, .lsdev, earg = .esdev)
+ if ( .var.arg ) {
+ dl.dva <- -0.5 / Varm + 0.5 * (y - mymu)^2 / sdev^4
+ } else {
+ dl.dsd <- -1.0 / sdev + (y - mymu)^2 / sdev^3
+ }
+
+ dmu.deta <- dtheta.deta(mymu, .lmean , earg = .emean )
+ if ( .var.arg ) {
+ dva.deta <- dtheta.deta(Varm, .lvar , earg = .evar )
+ } else {
+ dsd.deta <- dtheta.deta(sdev, .lsd , earg = .esd )
+ }
ans <- c(w) * cbind(dl.dmu * dmu.deta,
- dl.dsd * dsd.deta)
+ if ( .var.arg ) dl.dva * dva.deta else dl.dsd * dsd.deta)
ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
ans
- }), list( .lmean = lmean, .lsdev = lsdev,
- .emean = emean, .esdev = esdev ))),
- weight = expression({
- wz <- matrix(as.numeric(NA), n, M) # diag matrix; y is one-column too
- ed2l.dmu2 <- -1 / sdev^2
+ }), list( .lmean = lmean, .lsd = lsd, .lvar = lvar,
+ .emean = emean, .esd = esd, .evar = evar,
+ .var.arg = var.arg ))),
+ weight = eval(substitute(expression({
+ wz <- matrix(as.numeric(NA), n, M) # diag matrix; y is one-column too
+
+ ed2l.dmu2 <- -1 / sdev^2
+ if ( .var.arg ) {
+ ed2l.dva2 <- -0.5 / Varm^2
+ } else {
ed2l.dsd2 <- -2 / sdev^2
- wz[, 2*(1:ncoly) - 1] <- -ed2l.dmu2 * dmu.deta^2
- wz[, 2*(1:ncoly) ] <- -ed2l.dsd2 * dsd.deta^2
- c(w) * wz
- }))
+ }
+
+ wz[, Musual*(1:ncoly) - 1] <- -ed2l.dmu2 * dmu.deta^2
+ if ( .var.arg ) {
+ wz[, Musual*(1:ncoly) ] <- -ed2l.dva2 * dva.deta^2
+ } else {
+ wz[, Musual*(1:ncoly) ] <- -ed2l.dsd2 * dsd.deta^2
+ }
+
+ c(w) * wz
+ }), list( .var.arg = var.arg ))))
}
@@ -1649,7 +1743,7 @@ tobit.control <- function(save.weight = TRUE, ...)
lmeanlog = as.character(substitute(lmeanlog))
if (mode(lsdlog) != "character" && mode(lsdlog) != "name")
lsdlog = as.character(substitute(lsdlog))
- if (length(zero) && (!is.Numeric(zero, integer = TRUE, posit = TRUE) ||
+ if (length(zero) && (!is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
zero > 2))
stop("bad input for argument argument 'zero'")
if (!is.list(emeanlog)) emeanlog = list()
@@ -1750,7 +1844,7 @@ tobit.control <- function(save.weight = TRUE, ...)
lmeanlog = as.character(substitute(lmeanlog))
if (mode(lsdlog) != "character" && mode(lsdlog) != "name")
lsdlog = as.character(substitute(lsdlog))
- if (length(zero) && (!is.Numeric(zero, integer = TRUE, posit = TRUE) ||
+ if (length(zero) && (!is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
zero > 3))
stop("bad input for argument argument 'zero'")
if (!is.list(emeanlog)) emeanlog = list()
@@ -1873,10 +1967,11 @@ dsnorm = function(x, location = 0, scale = 1, shape = 0, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- if (!is.Numeric(scale, posit = TRUE))
+ if (!is.Numeric(scale, positive = TRUE))
stop("bad input for argument 'scale'")
zedd = (x - location) / scale
- loglik = log(2) + dnorm(zedd, log = TRUE) + pnorm(shape * zedd, log.p = TRUE) -
+ loglik = log(2) + dnorm(zedd, log = TRUE) +
+ pnorm(shape * zedd, log.p = TRUE) -
log(scale)
if (log.arg) {
loglik
@@ -1888,11 +1983,14 @@ dsnorm = function(x, location = 0, scale = 1, shape = 0, log = FALSE) {
rsnorm = function(n, location = 0, scale = 1, shape=0) {
- if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
- stop("bad input for argument 'n'")
- if (!is.Numeric(scale, posit = TRUE))
- stop("bad input for argument 'scale'")
- if (!is.Numeric(shape)) stop("bad input for argument 'shape'")
+ if (!is.Numeric(n, positive = TRUE,
+ integer.valued = TRUE, allowable.length = 1))
+ stop("bad input for argument 'n'")
+ if (!is.Numeric(scale, positive = TRUE))
+ stop("bad input for argument 'scale'")
+ if (!is.Numeric(shape))
+ stop("bad input for argument 'shape'")
+
rho = shape / sqrt(1 + shape^2)
u0 = rnorm(n)
v = rnorm(n)
@@ -1910,7 +2008,7 @@ rsnorm = function(n, location = 0, scale = 1, shape=0) {
lshape = as.character(substitute(lshape))
if (!is.list(earg)) earg = list()
if (length(nsimEIM) &&
- (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 10))
+ (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE) || nsimEIM <= 10))
stop("argument 'nsimEIM' should be an integer greater than 10")
new("vglmff",
diff --git a/R/family.others.R b/R/family.others.R
index e16df94..f739b1a 100644
--- a/R/family.others.R
+++ b/R/family.others.R
@@ -1,4 +1,4 @@
-# These functions are Copyright (C) 1998-2011 T. W. Yee All rights reserved.
+# These functions are Copyright (C) 1998-2012 T. W. Yee All rights reserved.
# family.others.R
@@ -111,11 +111,11 @@ rexppois <- function(n, lambda, betave = 1) {
if (mode(lbetave) != "character" && mode(lbetave) != "name")
lbetave = as.character(substitute(lbetave))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (length(ilambda) && !is.Numeric(ilambda, posit = TRUE))
+ if (length(ilambda) && !is.Numeric(ilambda, positive = TRUE))
stop("bad input for argument 'ilambda'")
- if (length(ibetave) && !is.Numeric(ibetave, posit = TRUE))
+ if (length(ibetave) && !is.Numeric(ibetave, positive = TRUE))
stop("bad input for argument 'ibetave'")
ilambda[abs(ilambda - 1) < 0.01] = 1.1
@@ -359,14 +359,14 @@ genrayleigh.control <- function(save.weight = TRUE, ...)
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
- if (length(ishape) && !is.Numeric(ishape, posit = TRUE))
+ if (length(ishape) && !is.Numeric(ishape, positive = TRUE))
stop("bad input for argument 'ishape'")
- if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
+ if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 50)
+ if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE) || nsimEIM <= 50)
stop("'nsimEIM' should be an integer greater than 50")
if (!is.list(escale))
@@ -407,7 +407,7 @@ genrayleigh.control <- function(save.weight = TRUE, ...)
ans
}
# Note: problems occur if scale values too close to zero:
- scale.grid = seq(0.2 * stats:::sd(y), 5 * stats:::sd(y), len = 29)
+ scale.grid = seq(0.2 * stats::sd(y), 5 * stats::sd(y), len = 29)
scale.init = if (length( .iscale )) .iscale else
getMaxMin(scale.grid, objfun = genrayleigh.Loglikfun,
y = y, x = x, w = w)
@@ -651,14 +651,14 @@ expgeometric.control <- function(save.weight = TRUE, ...)
lscale = as.character(substitute(lscale))
if (length(ishape))
- if (!is.Numeric(ishape, posit = TRUE) || any(ishape >= 1))
+ if (!is.Numeric(ishape, positive = TRUE) || any(ishape >= 1))
stop("bad input for argument 'ishape'")
if (length(iscale))
- if (!is.Numeric(iscale, posit = TRUE))
+ if (!is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(escale))
@@ -666,7 +666,7 @@ expgeometric.control <- function(save.weight = TRUE, ...)
if (!is.list(eshape))
eshape = list()
- if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE))
+ if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE))
stop("bad input for argument 'nsimEIM'")
if (nsimEIM <= 50)
stop("'nsimEIM' should be an integer greater than 50")
@@ -696,17 +696,17 @@ expgeometric.control <- function(save.weight = TRUE, ...)
if (!length(etastart)) {
- scale.init = if (is.Numeric( .iscale , posit = TRUE)) {
+ scale.init = if (is.Numeric( .iscale , positive = TRUE)) {
rep( .iscale , len = n)
} else {
# The scale parameter should be
# the standard deviation of y.
- stats:::sd(y) # The papers scale parameter beta
+ stats::sd(y) # The papers scale parameter beta
}
#print("head(scale.init)")
#print( head(scale.init) )
- shape.init = if (is.Numeric( .ishape , posit = TRUE)) {
+ shape.init = if (is.Numeric( .ishape , positive = TRUE)) {
rep( .ishape , len = n)
} else {
# Use the formula for the median:
@@ -802,7 +802,7 @@ expgeometric.control <- function(save.weight = TRUE, ...)
# if (FALSE) {
# ed2l.dscale2 = (3 * shape - 2 * (shape - (1 - shape) *
-# (gsl:::dilog(shape,2)$val))) / (3 * Scale^2 * shape)
+# (gsl::dilog(shape,2)$val))) / (3 * Scale^2 * shape)
# ed2l.dshape2 = (1 - shape)^(-2) / 3
@@ -975,14 +975,14 @@ explogarithmic.control <- function(save.weight = TRUE, ...)
lscale = as.character(substitute(lscale))
if (length(ishape))
- if (!is.Numeric(ishape, posit = TRUE) || any(ishape >= 1))
+ if (!is.Numeric(ishape, positive = TRUE) || any(ishape >= 1))
stop("bad input for argument 'ishape'")
if (length(iscale))
- if (!is.Numeric(iscale, posit = TRUE))
+ if (!is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(escale))
@@ -990,7 +990,7 @@ explogarithmic.control <- function(save.weight = TRUE, ...)
if (!is.list(eshape))
eshape = list()
- if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE))
+ if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE))
stop("bad input for argument 'nsimEIM'")
if (nsimEIM <= 50)
stop("'nsimEIM' should be an integer greater than 50")
@@ -1017,15 +1017,15 @@ explogarithmic.control <- function(save.weight = TRUE, ...)
if (!length(etastart)) {
- scale.init = if (is.Numeric( .iscale , posit = TRUE)) {
+ scale.init = if (is.Numeric( .iscale , positive = TRUE)) {
rep( .iscale , len = n)
} else {
# The scale parameter should be
# the standard deviation of y.
- stats:::sd(y)
+ stats::sd(y)
}
- shape.init = if (is.Numeric( .ishape , posit = TRUE)) {
+ shape.init = if (is.Numeric( .ishape , positive = TRUE)) {
rep( .ishape , len = n)
} else {
# Use the formula for the median (Tahmasabi pg. 3891):
@@ -1245,10 +1245,11 @@ if (FALSE)
if (mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (!is.Numeric(imethod, allow = 1, integer = TRUE, positi = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 3)
stop("argument 'imethod' must be 1, 2 or 3")
@@ -1310,7 +1311,7 @@ if (FALSE)
scale.init = if(length( .iscale )) {
rep( .iscale , len = n )
} else {
- rep(stats:::sd(y) / sqrt(gamma(1 + 2/shape.init) -
+ rep(stats::sd(y) / sqrt(gamma(1 + 2/shape.init) -
gamma(1 + 1/shape.init)^2) , len = n)
}
@@ -1583,8 +1584,8 @@ ptpn <- function(q, location = 0, scale = 1, skewpar = 0.5) {
zedd <- (q - location) / scale
- s1 <- 2 * skewpar * pnorm(zedd, sd = 2 * skewpar) #/ scale
- s2 <- skewpar + (1 - skewpar) * pgamma(zedd^2 / (8 * (1-skewpar)^2), 0.5)
+ s1 <- 2 * skewpar * pnorm(zedd, sd = 2 * skewpar) #/ scale
+ s2 <- skewpar + (1 - skewpar) * pgamma(zedd^2 / (8 * (1-skewpar)^2), 0.5)
ans <- rep(0.0, length(zedd))
ans[zedd <= 0] <- s1[zedd <= 0]
@@ -1649,18 +1650,18 @@ tpnff <- function(llocation = "identity", lscale = "loge",
# parameters of the TPN distribution, I am not worry about the skew
# parameter p.
# Note : pp = Skewparameter
- if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(method.init, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
method.init > 4)
stop("'imethod' must be 1 or 2 or 3 or 4")
- if (!is.Numeric(pp, allow = 1, posit = TRUE))
+ if (!is.Numeric(pp, allowable.length = 1, positive = TRUE))
stop("bad input for argument 'pp'")
if (mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(elocation)) elocation = list()
if (!is.list(escale)) escale = list()
@@ -1787,23 +1788,26 @@ tpnff <- function(llocation = "identity", lscale = "loge",
tpnff3 <- function(llocation = "identity", elocation = list(),
lscale = "loge", escale = list(),
lskewpar = "identity", eskewpar = list(),
- method.init = 1, zero = 2)
+ method.init = 1, zero = 2)
{
- if (!is.Numeric(method.init, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(method.init, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
method.init > 4)
- stop("'imethod' must be 1 or 2 or 3 or 4")
+ stop("'imethod' must be 1 or 2 or 3 or 4")
- # if (!is.Numeric(pp, allow = 1, posit = TRUE))
+ # if (!is.Numeric(pp, allowable.length = 1, positive = TRUE))
# stop("bad input for argument 'pp'")
if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
+ llocation = as.character(substitute(llocation))
if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
+ lscale = as.character(substitute(lscale))
if (mode(lskewpar) != "character" && mode(lskewpar) != "name")
- lscale = as.character(substitute(lscale))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
- stop("bad input for argument 'zero'")
+ lscale = as.character(substitute(lscale))
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
if (!is.list(elocation)) elocation = list()
if (!is.list(escale)) escale = list()
if (!is.list(eskewpar)) eskewpar = list()
@@ -1811,9 +1815,9 @@ tpnff3 <- function(llocation = "identity", elocation = list(),
new("vglmff",
blurb = c("Two-piece normal distribution \n\n",
"Links: ",
- namesof("location", llocation, earg = elocation), ", ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("skewpar", lscale, earg = epp), "\n\n",
+ namesof("location", llocation, earg = elocation), ", ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("skewpar", lscale, earg = eskewpar), "\n\n",
"Mean: "),
constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero, M)
diff --git a/R/family.positive.R b/R/family.positive.R
index 317d27a..987dd13 100644
--- a/R/family.positive.R
+++ b/R/family.positive.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -18,7 +18,8 @@ rhuggins91 =
use.n <- if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
orig.n <- use.n
use.n <- 1.50 * use.n + 100 # Bigger due to rejections
@@ -389,83 +390,90 @@ dhuggins91 = function(x, prob, prob0 = prob, log = FALSE) {
dposnegbin = function(x, size, prob = NULL, munb = NULL, log = FALSE) {
- if (length(munb)) {
- if (length(prob))
- stop("'prob' and 'munb' both specified")
- prob <- size/(size + munb)
- }
- if (!is.logical(log.arg <- log)) stop("bad input for 'log'")
- rm(log)
-
- L = max(length(x), length(prob), length(size))
- x = rep(x, len = L); prob = rep(prob, len = L); size = rep(size, len = L);
-
- ans = dnbinom(x = x, size = size, prob = prob, log=log.arg)
- index0 = x == 0
+ if (length(munb)) {
+ if (length(prob))
+ stop("'prob' and 'munb' both specified")
+ prob <- size/(size + munb)
+ }
+ if (!is.logical(log.arg <- log)) stop("bad input for 'log'")
+ rm(log)
- if (log.arg) {
- ans[ index0] = log(0.0)
- ans[!index0] = ans[!index0] - log1p(-dnbinom(x = 0 * x[!index0],
- size = size[!index0], prob = prob[!index0]))
- } else {
- ans[ index0] = 0.0
- ans[!index0] = ans[!index0] / pnbinom(q=0 * x[!index0],
- size = size[!index0], prob = prob[!index0], lower.tail=FALSE)
- }
- ans
+ L = max(length(x), length(prob), length(size))
+ x = rep(x, len = L); prob = rep(prob, len = L);
+ size = rep(size, len = L);
+
+ ans = dnbinom(x = x, size = size, prob = prob, log = log.arg)
+ index0 = (x == 0)
+
+ if (log.arg) {
+ ans[ index0] = log(0.0)
+ ans[!index0] = ans[!index0] - log1p(-dnbinom(x = 0 * x[!index0],
+ size = size[!index0], prob = prob[!index0]))
+ } else {
+ ans[ index0] = 0.0
+ ans[!index0] = ans[!index0] / pnbinom(q = 0 * x[!index0],
+ size = size[!index0], prob = prob[!index0],
+ lower.tail = FALSE)
+ }
+ ans
}
pposnegbin = function(q, size, prob = NULL, munb = NULL) {
- if (length(munb)) {
- if (length(prob))
- stop("'prob' and 'munb' both specified")
- prob <- size/(size + munb)
- }
- L = max(length(q), length(prob), length(size))
- q = rep(q, len = L); prob = rep(prob, len = L); size = rep(size, len = L);
+ if (length(munb)) {
+ if (length(prob))
+ stop("'prob' and 'munb' both specified")
+ prob <- size/(size + munb)
+ }
+ L = max(length(q), length(prob), length(size))
+ q = rep(q, len = L); prob = rep(prob, len = L); size = rep(size, len = L)
- ifelse(q < 1, 0, (pnbinom(q, size = size, prob = prob) -
- dnbinom(q*0, size = size, prob = prob)) / pnbinom(q * 0,
- size = size, prob = prob, lower.tail = FALSE))
+ ifelse(q < 1, 0, (pnbinom(q, size = size, prob = prob) -
+ dnbinom(q*0, size = size, prob = prob))
+ / pnbinom(q*0, size = size, prob = prob,
+ lower.tail = FALSE))
}
qposnegbin = function(p, size, prob = NULL, munb = NULL) {
- if (length(munb)) {
- if (length(prob))
- stop("'prob' and 'munb' both specified")
- prob <- size / (size + munb)
- }
- if (!is.Numeric(p, posit = TRUE) || any(p >= 1))
- stop("bad input for argument 'p'")
- qnbinom(p * pnbinom(q = p*0, size = size, prob = prob,
- lower.tail = FALSE) +
- dnbinom(x = p*0, size = size, prob = prob),
- size = size, prob = prob)
+ if (length(munb)) {
+ if (length(prob))
+ stop("'prob' and 'munb' both specified")
+ prob <- size / (size + munb)
+ }
+ if (!is.Numeric(p, positive = TRUE) ||
+ any(p >= 1))
+ stop("bad input for argument 'p'")
+ qnbinom(p * pnbinom(q = p*0, size = size, prob = prob,
+ lower.tail = FALSE) +
+ dnbinom(x = p*0, size = size, prob = prob),
+ size = size, prob = prob)
}
rposnegbin = function(n, size, prob = NULL, munb = NULL) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
- stop("bad input for argument 'n'") else n
-
- if (length(munb)) {
- if (length(prob))
- stop("'prob' and 'munb' both specified")
- prob <- size/(size + munb)
- }
- ans = rnbinom(use.n, size = size, prob = prob)
+
+
+ use.n = if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE, allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+
+ if (length(munb)) {
+ if (length(prob))
+ stop("'prob' and 'munb' both specified")
+ prob <- size / (size + munb)
+ }
+ ans = rnbinom(use.n, size = size, prob = prob)
+ index = (ans == 0)
+ size = rep(size, len = use.n)
+ prob = rep(prob, len = use.n)
+ while(any(index, na.rm = TRUE)) {
+ more = rnbinom(n = sum(index), size = size[index],
+ prob = prob[index])
+ ans[index] = more
index = (ans == 0)
- size = rep(size, len=length(ans))
- prob = rep(prob, len=length(ans))
- while(any(index)) {
- more = rnbinom(n=sum(index), size = size[index], prob = prob[index])
- ans[index] = more
- index = (ans == 0)
- }
- ans
+ }
+ ans
}
@@ -474,249 +482,364 @@ rposnegbin = function(n, size, prob = NULL, munb = NULL) {
+
+posnegbinomial.control <- function(save.weight = TRUE, ...)
+{
+ list(save.weight = save.weight)
+}
+
+
+
posnegbinomial = function(lmunb = "loge", lsize = "loge",
emunb = list(), esize = list(),
- isize = NULL, zero = -2, cutoff = 0.995,
+ isize = NULL, zero = -2,
+ nsimEIM = 250,
shrinkage.init = 0.95, imethod = 1)
{
- if (!is.Numeric(cutoff, allow = 1) ||
- cutoff < 0.8 || cutoff >= 1)
- stop("range error in the argument 'cutoff'")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
- imethod > 2) stop("argument 'imethod' must be 1 or 2")
- if (length(isize) && !is.Numeric(isize, posit = TRUE))
- stop("bad input for argument 'isize'")
- if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
- shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
-
- if (mode(lmunb) != "character" && mode(lmunb) != "name")
- lmunb = as.character(substitute(lmunb))
- if (mode(lsize) != "character" && mode(lsize) != "name")
- lsize = as.character(substitute(lsize))
- if (!is.list(emunb)) emunb = list()
- if (!is.list(esize)) esize = list()
-
- new("vglmff",
- blurb = c("Positive-negative binomial distribution\n\n",
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+ if (length(isize) && !is.Numeric(isize, positive = TRUE))
+ stop("bad input for argument 'isize'")
+ if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+ shrinkage.init < 0 ||
+ shrinkage.init > 1)
+ stop("bad input for argument 'shrinkage.init'")
+
+ if (mode(lmunb) != "character" && mode(lmunb) != "name")
+ lmunb = as.character(substitute(lmunb))
+ if (mode(lsize) != "character" && mode(lsize) != "name")
+ lsize = as.character(substitute(lsize))
+
+ if (!is.list(emunb)) emunb = list()
+ if (!is.list(esize)) esize = list()
+
+
+ if (!is.Numeric(nsimEIM, allowable.length = 1, positive = TRUE, integer.valued = TRUE))
+ stop("argument 'nsimEIM' must be a positive integer")
+ if (nsimEIM <= 30)
+ warning("argument 'nsimEIM' should be greater than 30, say")
+
+
+ new("vglmff",
+ blurb = c("Positive-negative binomial distribution\n\n",
"Links: ",
namesof("munb", lmunb, earg = emunb ), ", ",
namesof("size", lsize, earg = esize ), "\n",
"Mean: munb / (1 - (size / (size + munb))^size)"),
- constraints = eval(substitute(expression({
-
- dotzero = .zero
- Musual = 2
- eval(negzero.expression)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (any(y==0)) stop("there are zero values in the response")
- y = as.matrix(y)
- M = 2 * ncol(y)
- extra$NOS = NOS = ncoly = ncol(y) # Number of species
- predictors.names = c(
- namesof(if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = ""),
- .lmunb, earg = .emunb, tag = FALSE),
- namesof(if (NOS == 1) "size" else paste("size", 1:NOS, sep = ""),
- .lsize, earg = .esize, tag = FALSE))
- predictors.names = predictors.names[interleave.VGAM(M, M = 2)]
- if (!length(etastart)) {
- mu.init = y
- for(iii in 1:ncol(y)) {
- use.this = if ( .imethod == 2) {
- weighted.mean(y[,iii], w)
- } else {
- median(y[,iii])
- }
- mu.init[,iii] = (1 - .sinit) * y[,iii] + .sinit * use.this
- }
+ constraints = eval(substitute(expression({
- if ( is.Numeric( .isize )) {
- kmat0 = matrix( .isize, nr = n, nc = NOS, byrow = TRUE)
- } else {
- posnegbinomial.Loglikfun =
- function(kmat, y, x, w, extraargs) {
- munb = extraargs
- sum(w * dposnegbin(x = y, size = kmat, munb = munb,
- log = TRUE))
- }
- k.grid = 2^((-6):6)
- kmat0 = matrix(0, nr=n, nc=NOS)
- for(spp. in 1:NOS) {
- kmat0[,spp.] = getMaxMin(k.grid,
- objfun=posnegbinomial.Loglikfun,
- y=y[,spp.], x=x, w=w,
- extraargs= mu.init[,spp.])
- }
- }
- p00 = (kmat0 / (kmat0 + mu.init))^kmat0
- etastart =
- cbind(theta2eta(mu.init*(1-p00), .lmunb, earg = .emunb ),
- theta2eta(kmat0, .lsize, earg = .esize ))
- etastart = etastart[, interleave.VGAM(M, M = 2), drop = FALSE]
- }
- }), list( .lmunb = lmunb, .lsize = lsize, .isize = isize,
- .emunb = emunb, .esize = esize,
- .sinit = shrinkage.init,
- .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- NOS = ncol(eta) / 2
- munb = eta2theta(eta[,2*(1:NOS)-1, drop = FALSE],
- .lmunb, earg = .emunb )
- kmat = eta2theta(eta[,2*(1:NOS), drop = FALSE],
- .lsize, earg = .esize )
- p0 = (kmat / (kmat + munb))^kmat
- munb / (1 - p0)
- }, list( .lsize = lsize, .lmunb = lmunb,
- .esize = esize, .emunb = emunb ))),
- last = eval(substitute(expression({
- temp0303 = c(rep( .lmunb, length = NOS),
- rep( .lsize, length = NOS))
- names(temp0303) =
- c(if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = ""),
- if (NOS == 1) "size" else paste("size", 1:NOS, sep = ""))
- temp0303 = temp0303[interleave.VGAM(M, M = 2)]
- misc$link = temp0303 # Already named
- misc$earg = vector("list", 2*NOS)
- names(misc$earg) = names(misc$link)
- for(ii in 1:NOS) {
- misc$earg[[2*ii-1]] = .emunb
- misc$earg[[2*ii ]] = .esize
- }
- misc$cutoff = .cutoff
- misc$imethod = .imethod
- }), list( .lmunb = lmunb, .lsize = lsize,
- .emunb = emunb, .esize = esize,
- .cutoff = cutoff, .imethod = imethod ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- NOS = ncol(eta) / 2
- munb = eta2theta(eta[,2*(1:NOS)-1, drop = FALSE],
- .lmunb, earg = .emunb )
- kmat = eta2theta(eta[,2*(1:NOS), drop = FALSE],
- .lsize, earg = .esize )
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(w * dposnegbin(x = y, size = kmat, munb = munb, log = TRUE))
+ dotzero = .zero
+ Musual = 2
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ Musual = 2
+
+ if (any(y == 0))
+ stop("there are zero values in the response")
+ y = as.matrix(y)
+ M = 2 * ncol(y)
+ extra$NOS = NOS = ncoly = ncol(y) # Number of species
+
+ predictors.names = c(
+ namesof(if (NOS == 1) "munb" else
+ paste("munb", 1:NOS, sep = ""),
+ .lmunb, earg = .emunb, tag = FALSE),
+ namesof(if (NOS == 1) "size" else
+ paste("size", 1:NOS, sep = ""),
+ .lsize, earg = .esize, tag = FALSE))
+ predictors.names = predictors.names[interleave.VGAM(M, M = Musual)]
+
+ if (!length(etastart)) {
+ mu.init = y
+ for(iii in 1:ncol(y)) {
+ use.this = if ( .imethod == 1) {
+ weighted.mean(y[, iii], w)
+ } else {
+ median(y[,iii])
}
+ mu.init[, iii] = (1 - .sinit) * y[, iii] + .sinit * use.this
+ }
+
+ if ( is.Numeric( .isize )) {
+ kmat0 = matrix( .isize , nrow = n, ncol = NOS, byrow = TRUE)
+ } else {
+ posnegbinomial.Loglikfun =
+ function(kmat, y, x, w, extraargs) {
+ munb = extraargs
+ sum(w * dposnegbin(x = y, size = kmat, munb = munb,
+ log = TRUE))
+ }
+ k.grid = 2^((-6):6)
+ kmat0 = matrix(0, nrow = n, ncol = NOS)
+ for(spp. in 1:NOS) {
+ kmat0[, spp.] = getMaxMin(k.grid,
+ objfun = posnegbinomial.Loglikfun,
+ y = y[, spp.], x = x, w = w,
+ extraargs = mu.init[, spp.])
+ }
+ }
+ p00 = (kmat0 / (kmat0 + mu.init))^kmat0
+ etastart =
+ cbind(
+ theta2eta(mu.init * (1 - p00), .lmunb, earg = .emunb ),
+ theta2eta(kmat0, .lsize, earg = .esize ))
+ etastart = etastart[,interleave.VGAM(M, M = Musual), drop = FALSE]
+ }
+ }), list( .lmunb = lmunb, .lsize = lsize, .isize = isize,
+ .emunb = emunb, .esize = esize,
+ .sinit = shrinkage.init,
+ .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ Musual = 2
+ NOS = ncol(eta) / Musual
+ munb = eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ .lmunb, earg = .emunb )
+ kmat = eta2theta(eta[, Musual*(1:NOS), drop = FALSE],
+ .lsize, earg = .esize )
+ po0 = (kmat / (kmat + munb))^kmat
+ munb / (1 - po0)
+ }, list( .lsize = lsize, .lmunb = lmunb,
+ .esize = esize, .emunb = emunb ))),
+ last = eval(substitute(expression({
+ temp0303 = c(rep( .lmunb , length = NOS),
+ rep( .lsize , length = NOS))
+ names(temp0303) =
+ c(if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = ""),
+ if (NOS == 1) "size" else paste("size", 1:NOS, sep = ""))
+ temp0303 = temp0303[interleave.VGAM(M, M = Musual)]
+ misc$link = temp0303 # Already named
+
+ misc$earg = vector("list", Musual*NOS)
+ names(misc$earg) = names(misc$link)
+ for(ii in 1:NOS) {
+ misc$earg[[Musual*ii-1]] = .emunb
+ misc$earg[[Musual*ii ]] = .esize
+ }
+
+ misc$nsimEIM = .nsimEIM
+ misc$imethod = .imethod
+ }), list( .lmunb = lmunb, .lsize = lsize,
+ .emunb = emunb, .esize = esize,
+ .nsimEIM = nsimEIM, .imethod = imethod ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ Musual = 2
+ NOS = ncol(eta) / Musual
+ munb = eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ .lmunb, earg = .emunb )
+ kmat = eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
+ .lsize, earg = .esize )
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dposnegbin(x = y, size = kmat, munb = munb, log = TRUE))
+ }
+ }, list( .lmunb = lmunb, .lsize = lsize,
+ .emunb = emunb, .esize = esize ))),
+
+ vfamily = c("posnegbinomial"),
+ deriv = eval(substitute(expression({
+ Musual = 2
+ NOS = extra$NOS
+
+ munb = eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ .lmunb , earg = .emunb )
+ kmat = eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
+ .lsize , earg = .esize )
+
+ dmunb.deta = dtheta.deta(munb, .lmunb, earg = .emunb )
+ dsize.deta = dtheta.deta(kmat, .lsize, earg = .esize )
+ NOS = ncol(eta) / Musual
+
+
+ tempk = kmat / (kmat + munb)
+ tempm = munb / (kmat + munb)
+ prob0 = tempk^kmat
+ oneminusf0 = 1 - prob0
+ df0.dmunb = -tempk * prob0
+ df0.dkmat = prob0 * (tempm + log(tempk))
+ df02.dmunb2 = prob0 * tempk / (kmat + munb) - tempk * df0.dmunb
+ df02.dkmat2 = (prob0 / kmat) * tempm^2
+ df02.dkmat.dmunb = prob0 * (-tempk) * (tempm + log(tempk)) -
+ tempm * prob0 / (kmat + munb)
+
+
+ dl.dmunb = y / munb - (y + kmat) / (munb + kmat) +
+ df0.dmunb / oneminusf0
+ dl.dsize = digamma(y + kmat) - digamma(kmat) -
+ (y + kmat)/(munb + kmat) + 1 + log(tempk) +
+ df0.dkmat / oneminusf0
+
+ myderiv = c(w) * cbind(dl.dmunb * dmunb.deta,
+ dl.dsize * dsize.deta)
+ myderiv[, interleave.VGAM(M, M = Musual)]
+ }), list( .lmunb = lmunb, .lsize = lsize,
+ .emunb = emunb, .esize = esize ))),
+ weight = eval(substitute(expression({
+ run.varcov =
+ wz = matrix(0.0, n, 4*NOS-1)
+
+
+
+
+ if (FALSE) {
+ usualmeanY = munb
+ meanY = usualmeanY / oneminusf0
+ ed2l.dmu2 = meanY / munb^2 -
+ (meanY + kmat) / (munb + kmat)^2 -
+ df02.dmunb2 / oneminusf0 -
+ (df0.dmunb / oneminusf0)^2
+ }
+
+
- }, list( .lmunb = lmunb, .lsize = lsize,
- .emunb = emunb, .esize = esize ))),
- vfamily = c("posnegbinomial"),
- deriv = eval(substitute(expression({
- NOS = extra$NOS
- munb = eta2theta(eta[,2*(1:NOS)-1, drop = FALSE],
- .lmunb, earg = .emunb )
- kmat = eta2theta(eta[,2*(1:NOS), drop = FALSE],
- .lsize, earg = .esize )
- d3 = deriv3(~ -log(1 - (kmat. / (kmat. + munb. ))^kmat. ),
- c("munb.", "kmat."), hessian = TRUE) # Extra term
- dl0.dthetas = array(NA, c(n, NOS, 2))
- d2l0.dthetas2 = array(NA, c(n, NOS, 3)) # matrix-band format
- for(spp. in 1:NOS) {
- kmat. = kmat[,spp.]
- munb. = munb[,spp.]
- eval.d3 = eval(d3) # Evaluated for one species
- dl0.dthetas[,spp.,1] = attr(eval.d3, "gradient")[,1]
- dl0.dthetas[,spp.,2] = attr(eval.d3, "gradient")[,2]
- d2l0.dthetas2[,spp.,1] = attr(eval.d3, "hessian")[,1,1]
- d2l0.dthetas2[,spp.,2] = attr(eval.d3, "hessian")[,2,2]
- d2l0.dthetas2[,spp.,3] = attr(eval.d3, "hessian")[,1,2]
+
+
+ {
+ ind2 = iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
+ for(ii in 1:( .nsimEIM )) {
+ ysim = rposnegbin(n = n*NOS, mu = c(munb), size = c(kmat))
+ dim(ysim) = c(n, NOS)
+
+ dl.dmunb = ysim / munb - (ysim + kmat) / (munb + kmat) +
+ df0.dmunb / oneminusf0
+ dl.dsize = digamma(ysim + kmat) - digamma(kmat) -
+ (ysim + kmat) / (munb + kmat) + 1 + log(tempk) +
+ df0.dkmat / oneminusf0
+
+ for(kk in 1:NOS) {
+ temp2 = cbind(dl.dmunb[, kk],
+ dl.dsize[, kk]) *
+ cbind(dmunb.deta[, kk],
+ dsize.deta[, kk])
+ small.varcov = temp2[, ind2$row.index] *
+ temp2[, ind2$col.index]
+
+ run.varcov[, ((kk-1)*Musual+1):(kk*Musual)] =
+ run.varcov[, ((kk-1)*Musual+1):(kk*Musual)] +
+ c(small.varcov[, 1:Musual])
+ run.varcov[, M + (kk-1)*Musual + 1] =
+ run.varcov[, M + (kk-1)*Musual + 1] +
+ c(small.varcov[, Musual + 1])
}
+ } # ii
+
+ run.varcov = cbind(run.varcov / .nsimEIM )
+ wz = if (intercept.only)
+ matrix(colMeans(run.varcov),
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
+
+ }
+
+
+ c(w) * wz
+ }), list( .nsimEIM = nsimEIM ))))
+}
+
+
+
+
+
+dposgeom = function(x, prob, log = FALSE) {
+ dgeom(x - 1, prob = prob, log = log)
+}
+
- NOS = ncol(eta) / 2
- dl.dmunb = y/munb - (y+kmat)/(kmat+munb) + dl0.dthetas[,,1]
- dl.dkayy = digamma(y+kmat) - digamma(kmat) -
- (y+kmat) / (munb+kmat) + 1 +
- log(kmat /(kmat+munb)) + dl0.dthetas[,,2]
- dmunb.deta = dtheta.deta(munb, .lmunb, earg = .emunb )
- dkayy.deta = dtheta.deta(kmat, .lsize, earg = .esize )
- myderiv = w * cbind(dl.dmunb * dmunb.deta,
- dl.dkayy * dkayy.deta)
- myderiv[, interleave.VGAM(M, M = 2)]
- }), list( .lmunb = lmunb, .lsize = lsize,
- .emunb = emunb, .esize = esize ))),
- weight = eval(substitute(expression({
- wz = matrix(0, n, 4*NOS-1) # wz is no longer 'diagonal'
- p0 = (kmat / (kmat + munb))^kmat
- ed2l.dmunb2 = (1/munb -
- (munb + kmat*(1-p0))/(munb+kmat)^2) / (1-p0) -
- d2l0.dthetas2[,,1]
- fred = dotFortran(name="enbin8", ans=double(n*NOS),
- as.double(kmat), as.double(kmat/(munb+kmat)),
- as.double(.cutoff), as.integer(n),
- ok=as.integer(1), as.integer(NOS), sumpdf=double(1),
- macheps = as.double(.Machine$double.eps))
- if (fred$ok != 1)
- stop("error in Fortran subroutine exnbin")
- dim(fred$ans) = c(n, NOS)
- ed2l.dk2 = -fred$ans/(1-p0) - 1/kmat + 1/(kmat+munb) -
- munb * p0 / ((1-p0)*(munb+kmat)^2) - d2l0.dthetas2[,,2]
-
- wz[,2*(1:NOS)-1] = dmunb.deta^2 * ed2l.dmunb2
- wz[,2*(1:NOS) ] = dkayy.deta^2 * ed2l.dk2
- wz[,2*NOS+2*(1:NOS)-1] = -d2l0.dthetas2[,,3] *
- dmunb.deta * dkayy.deta
-
- c(w) * wz
- }), list( .cutoff = cutoff ))))
+pposgeom = function(q, prob) {
+ if (!is.Numeric(prob, positive = TRUE))
+ stop("bad input for argument 'prob'")
+ L = max(length(q), length(prob))
+ if (length(q) != L) q = rep(q, len = L);
+ if (length(prob) != L) prob = rep(prob, len = L);
+ ifelse(q < 1, 0, (pgeom(q, prob) - prob) / (1 - prob))
+}
+
+
+qposgeom = function(p, prob) {
+ if (!is.Numeric(prob, positive = TRUE))
+ stop("bad input for argument 'prob'")
+ if (!is.Numeric(p, positive = TRUE) || any(p >= 1))
+ stop("bad input for argument 'p'")
+ qgeom(p * (1 - prob) + prob, prob = prob)
+}
+
+
+rposgeom = function(n, prob) {
+ use.n = if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE, allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+
+ ans = rgeom(use.n, prob = prob)
+ prob = rep(prob, len = use.n)
+ index = (ans == 0)
+ while(any(index)) {
+ more = rgeom(n = sum(index), prob[index])
+ ans[index] = more
+ index = (ans == 0)
+ }
+ ans
}
+
+
dpospois = function(x, lambda, log = FALSE) {
- if (!is.logical(log.arg <- log)) stop("bad input for 'log'")
- rm(log)
-
- if (!is.Numeric(lambda, posit = TRUE))
- stop("bad input for argument 'lambda'")
- L = max(length(x), length(lambda))
- x = rep(x, len = L); lambda = rep(lambda, len = L);
- ans = if (log.arg) {
- ifelse(x == 0, log(0.0), dpois(x, lambda, log = TRUE) -
- log1p(-exp(-lambda)))
- } else {
- ifelse(x == 0, 0, -dpois(x, lambda) / expm1(-lambda))
- }
- ans
+ if (!is.logical(log.arg <- log)) stop("bad input for 'log'")
+ rm(log)
+
+ if (!is.Numeric(lambda, positive = TRUE))
+ stop("bad input for argument 'lambda'")
+ L = max(length(x), length(lambda))
+ x = rep(x, len = L); lambda = rep(lambda, len = L);
+ ans = if (log.arg) {
+ ifelse(x == 0, log(0.0), dpois(x, lambda, log = TRUE) -
+ log1p(-exp(-lambda)))
+ } else {
+ ifelse(x == 0, 0, -dpois(x, lambda) / expm1(-lambda))
+ }
+ ans
}
ppospois = function(q, lambda) {
- if (!is.Numeric(lambda, posit = TRUE))
- stop("bad input for argument 'lambda'")
- L = max(length(q), length(lambda))
- q = rep(q, len = L); lambda = rep(lambda, len = L);
- ifelse(q < 1, 0, (ppois(q, lambda) - exp(-lambda)) / (-expm1(-lambda)))
+ if (!is.Numeric(lambda, positive = TRUE))
+ stop("bad input for argument 'lambda'")
+ L = max(length(q), length(lambda))
+ q = rep(q, len = L); lambda = rep(lambda, len = L);
+ ifelse(q < 1, 0, (ppois(q, lambda) - exp(-lambda)) / (-expm1(-lambda)))
}
+
qpospois = function(p, lambda) {
- if (!is.Numeric(lambda, posit = TRUE))
- stop("bad input for argument 'lambda'")
- if (!is.Numeric(p, posit = TRUE) || any(p >= 1))
- stop("bad input for argument 'p'")
- qpois(p * (-expm1(-lambda)) + exp(-lambda), lambda)
+ if (!is.Numeric(lambda, positive = TRUE))
+ stop("bad input for argument 'lambda'")
+ if (!is.Numeric(p, positive = TRUE) || any(p >= 1))
+ stop("bad input for argument 'p'")
+ qpois(p * (-expm1(-lambda)) + exp(-lambda), lambda)
}
rpospois = function(n, lambda) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
- stop("bad input for argument 'n'") else n
-
- if (any(lambda == 0))
- stop("no zero values allowed for argument 'lambda'")
- ans = rpois(use.n, lambda)
- lambda = rep(lambda, len=use.n)
+ use.n = if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE, allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+
+ if (any(lambda == 0))
+ stop("no zero values allowed for argument 'lambda'")
+ ans = rpois(use.n, lambda)
+ lambda = rep(lambda, len = use.n)
+ index = (ans == 0)
+ while(any(index)) {
+ more = rpois(n = sum(index), lambda[index])
+ ans[index] = more
index = (ans == 0)
- while(any(index)) {
- more = rpois(n=sum(index), lambda[index])
- ans[index] = more
- index = (ans == 0)
- }
- ans
+ }
+ ans
}
@@ -726,21 +849,29 @@ rpospois = function(n, lambda) {
ilambda = NULL, imethod = 1)
{
- if (!missing(link))
+ if (mode(link) != "character" && mode(link) != "name")
link <- as.character(substitute(link))
if (!is.list(earg)) earg <- list()
if (!is.logical(expected) || length(expected) != 1)
stop("bad input for argument 'expected'")
- if (length( ilambda) && !is.Numeric(ilambda, posit = TRUE))
+ if (length( ilambda) && !is.Numeric(ilambda, positive = TRUE))
stop("bad input for argument 'ilambda'")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
- imethod > 3) stop("argument 'imethod' must be 1 or 2 or 3")
+
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
+ imethod > 3)
+ stop("argument 'imethod' must be 1 or 2 or 3")
new("vglmff",
blurb = c("Positive-Poisson distribution\n\n",
"Links: ",
namesof("lambda", link, earg = earg, tag = FALSE)),
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ link = .link,
+ earg = .earg)
+ }, list( .link = link, .earg = earg ))),
+
initialize = eval(substitute(expression({
y <- as.matrix(y)
@@ -749,21 +880,22 @@ rpospois = function(n, lambda) {
if (any(y != round(y )))
stop("the response must be integer-valued")
- predictors.names <- namesof(
- paste("lambda", if (ncol(y) > 1) 1:ncol(y) else "", sep = ""),
- .link, earg = .earg, tag = FALSE)
+ predictors.names <-
+ namesof(paste("lambda", if (ncol(y) > 1) 1:ncol(y) else "", sep = ""),
+ .link, earg = .earg, tag = FALSE)
if ( .imethod == 1) {
lambda.init <- apply(y, 2, median) + 1/8
lambda.init <- matrix(lambda.init, n, ncol(y), byrow = TRUE)
} else if ( .imethod == 2) {
- lambda.init <- apply(y, 2, weighted.mean, w=w) + 1/8
+ lambda.init <- apply(y, 2, weighted.mean, w = w) + 1/8
lambda.init <- matrix(lambda.init, n, ncol(y), byrow = TRUE)
} else {
lambda.init <- -y / expm1(-y)
}
if (length( .ilambda))
lambda.init <- lambda.init * 0 + .ilambda
+
if (!length(etastart))
etastart <- theta2eta(lambda.init, .link, earg = .earg)
}), list( .link = link, .earg = earg,
@@ -774,13 +906,15 @@ rpospois = function(n, lambda) {
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
misc$expected <- .expected
+
misc$link <- rep( .link, len = M)
names(misc$link) <- if (M == 1) "lambda" else
paste("lambda", 1:M, sep = "")
+
misc$earg <- vector("list", M)
names(misc$earg) <- names(misc$link)
for(ii in 1:M)
- misc$earg[[ii]] <- .earg
+ misc$earg[[ii]] <- .earg
}), list( .link = link, .earg = earg, .expected = expected ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
@@ -831,56 +965,56 @@ pposbinom = function(q, size, prob, lower.tail = TRUE, log.p = FALSE) {
qposbinom = function(p, size, prob, lower.tail = TRUE, log.p = FALSE) {
- if (!is.Numeric(prob, positive = TRUE))
- stop("no zero or non-numeric values allowed for argument 'prob'")
- if (!is.Numeric(p, posit = TRUE) || any(p >= 1))
- stop("bad input for argument 'p'")
- qbinom(p = p * (1 - (1-prob)^size) + (1-prob)^size, size = size,
- prob = prob, lower.tail = lower.tail, log.p = log.p)
+ if (!is.Numeric(prob, positive = TRUE))
+ stop("no zero or non-numeric values allowed for argument 'prob'")
+ if (!is.Numeric(p, positive = TRUE) || any(p >= 1))
+ stop("bad input for argument 'p'")
+ qbinom(p = p * (1 - (1-prob)^size) + (1-prob)^size, size = size,
+ prob = prob, lower.tail = lower.tail, log.p = log.p)
}
rposbinom = function(n, size, prob) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
- stop("bad input for argument 'n'") else n
-
- if (any(prob == 0))
- stop("no zero values allowed for argument 'prob'")
- ans = rbinom(n=use.n, size = size, prob = prob)
+ use.n = if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE, allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+
+ if (any(prob == 0))
+ stop("no zero values allowed for argument 'prob'")
+ ans = rbinom(n = use.n, size = size, prob = prob)
+ index = (ans == 0)
+ size = rep(size, len=length(ans))
+ prob = rep(prob, len=length(ans))
+ while(any(index)) {
+ more = rbinom(n = sum(index), size[index], prob = prob[index])
+ ans[index] = more
index = (ans == 0)
- size = rep(size, len=length(ans))
- prob = rep(prob, len=length(ans))
- while(any(index)) {
- more = rbinom(n=sum(index), size[index], prob = prob[index])
- ans[index] = more
- index = (ans == 0)
- }
- ans
+ }
+ ans
}
dposbinom = function(x, size, prob, log = FALSE) {
- log.arg = log
- rm(log)
- L = max(length(x), length(size), length(prob))
- x = rep(x, len = L); size = rep(size, len = L);
- prob = rep(prob, len = L);
-
- answer = NaN * x
- is0 <- (x == 0)
- ok2 <- (prob > 0) & (prob <= 1) &
- (size == round(size)) & (size > 0)
- answer = dbinom(x = x, size = size, prob = prob, log = TRUE) -
- log1p(-dbinom(x = 0*x, size = size, prob = prob))
- answer[!ok2] = NaN
- if (log.arg) {
- answer[is0 & ok2] = log(0.0)
- } else {
- answer = exp(answer)
- answer[is0 & ok2] = 0.0
- }
- answer
+ log.arg = log
+ rm(log)
+ L = max(length(x), length(size), length(prob))
+ x = rep(x, len = L); size = rep(size, len = L);
+ prob = rep(prob, len = L);
+
+ answer = NaN * x
+ is0 <- (x == 0)
+ ok2 <- (prob > 0) & (prob <= 1) &
+ (size == round(size)) & (size > 0)
+ answer = dbinom(x = x, size = size, prob = prob, log = TRUE) -
+ log1p(-dbinom(x = 0*x, size = size, prob = prob))
+ answer[!ok2] = NaN
+ if (log.arg) {
+ answer[is0 & ok2] = log(0.0)
+ } else {
+ answer = exp(answer)
+ answer[is0 & ok2] = 0.0
+ }
+ answer
}
@@ -893,7 +1027,7 @@ dposbinom = function(x, size, prob, log = FALSE) {
mv = FALSE, parallel = FALSE, zero = NULL) {
- if (!missing(link))
+ if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
@@ -943,18 +1077,18 @@ dposbinom = function(x, size, prob, log = FALSE) {
predictors.names = namesof(if (M > 1) dn2 else
"prob", .link, earg = .earg, short = TRUE)
- w = matrix(w, n, ncoly)
- y = y / w # Now sample proportion
+ w = matrix(w, n, ncoly)
+ y = y / w # Now sample proportion
} else {
- predictors.names =
- namesof("prob", .link, earg = .earg , tag = FALSE)
+ predictors.names =
+ namesof("prob", .link, earg = .earg , tag = FALSE)
}
if (length(extra)) extra$w = w else extra = list(w = w)
if (!length(etastart)) {
- mustart.use = if (length(mustart.orig)) mustart.orig else mustart
- etastart = cbind(theta2eta(mustart.use, .link, earg = .earg ))
+ mustart.use = if (length(mustart.orig)) mustart.orig else mustart
+ etastart = cbind(theta2eta(mustart.use, .link, earg = .earg ))
}
mustart = NULL
}), list( .link = link, .earg = earg, .mv = mv ))),
@@ -1031,14 +1165,14 @@ dposbinom = function(x, size, prob, log = FALSE) {
dl.dmu = y / mymu - (1 - y) / (1 - mymu) -
(1 - mymu) * temp3 / temp1
- w * dl.dmu * dmu.deta
+ c(w) * dl.dmu * dmu.deta
}), list( .link = link, .earg = earg, .mv = mv ))),
weight = eval(substitute(expression({
ed2l.dmu2 = 1 / (mymu * temp1) + 1 / temp2 -
mymu / (temp1 * temp2) -
(nvec-1) * temp3 / temp1 -
nvec * (temp2^(nvec-1)) / temp1^2
- wz = w * (dmu.deta^2) * ed2l.dmu2
+ wz = c(w) * ed2l.dmu2 * dmu.deta^2
wz
}), list( .link = link, .earg = earg, .mv = mv ))))
}
@@ -1057,18 +1191,12 @@ dposbinom = function(x, size, prob, log = FALSE) {
- labil = lability
- eabil = eability
- ldiff = ldifficulty
- ediff = edifficulty
-
-
if (mode(labil) != "character" && mode(labil) != "name")
labil = as.character(substitute(labil))
+
if (!is.list(eabil)) eabil = list()
if (!is.list(ediff)) ediff = list()
-
if (length(iability))
if (!is.Numeric(iability))
stop("bad input in argument 'iability'")
@@ -1076,6 +1204,11 @@ dposbinom = function(x, size, prob, log = FALSE) {
if (!is.Numeric(idifficulty))
stop("bad input in argument 'idifficulty'")
+ labil = lability
+ eabil = eability
+ ldiff = ldifficulty
+ ediff = edifficulty
+
new("vglmff",
blurb = c("Rasch model\n\n",
@@ -1138,8 +1271,6 @@ dposbinom = function(x, size, prob, log = FALSE) {
linkinv = eval(substitute(function(eta, extra = NULL) {
myprobs = eta2theta(eta, "logit", earg = list())
- print("head(myprobs)")
- print( head(myprobs) )
myprobs
}, list( .labil = labil, .eabil = eabil,
.ldiff = ldiff, .ediff = ediff ))),
diff --git a/R/family.qreg.R b/R/family.qreg.R
index 0547c4d..046f754 100644
--- a/R/family.qreg.R
+++ b/R/family.qreg.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -43,12 +43,14 @@ lms.yjn.control <- function(trace = TRUE, ...)
lmu = as.character(substitute(lmu))
if (mode(lsigma) != "character" && mode(lsigma) != "name")
lsigma = as.character(substitute(lsigma))
+
if (!is.list(elambda)) elambda = list()
if (!is.list(emu)) emu = list()
if (!is.list(esigma)) esigma = list()
+
if (!is.Numeric(ilambda))
stop("bad input for argument 'ilambda'")
- if (length(isigma) && !is.Numeric(isigma, posit = TRUE))
+ if (length(isigma) && !is.Numeric(isigma, positive = TRUE))
stop("bad input for argument 'isigma'")
if (length(expectiles) != 1 || !is.logical(expectiles))
stop("bad input for argument 'expectiles'")
@@ -194,12 +196,14 @@ lms.yjn.control <- function(trace = TRUE, ...)
lmu = as.character(substitute(lmu))
if (mode(lsigma) != "character" && mode(lsigma) != "name")
lsigma = as.character(substitute(lsigma))
+
if (!is.list(elambda)) elambda = list()
if (!is.list(emu)) emu = list()
if (!is.list(esigma)) esigma = list()
+
if (!is.Numeric(ilambda))
stop("bad input for argument 'ilambda'")
- if (length(isigma) && !is.Numeric(isigma, posit = TRUE))
+ if (length(isigma) && !is.Numeric(isigma, positive = TRUE))
stop("bad input for argument 'isigma'")
new("vglmff",
@@ -340,29 +344,35 @@ lms.yjn.control <- function(trace = TRUE, ...)
dy.dpsi.yeojohnson = function(psi, lambda) {
L = max(length(psi), length(lambda))
- psi = rep(psi, len=L); lambda = rep(lambda, len=L);
+ psi = rep(psi, length.out = L); lambda = rep(lambda, length.out = L);
ifelse(psi>0, (1 + psi * lambda)^(1/lambda - 1),
(1 - (2-lambda) * psi)^((lambda - 1)/(2-lambda)))
}
+
dyj.dy.yeojohnson = function(y, lambda) {
L = max(length(y), length(lambda))
- y = rep(y, len=L); lambda = rep(lambda, len=L);
+ y = rep(y, length.out = L); lambda = rep(lambda, length.out = L);
ifelse(y>0, (1 + y)^(lambda - 1), (1 - y)^(1 - lambda))
}
+
yeo.johnson = function(y, lambda, derivative = 0,
- epsilon = sqrt(.Machine$double.eps), inverse= FALSE)
+ epsilon = sqrt(.Machine$double.eps),
+ inverse = FALSE)
{
- if (!is.Numeric(derivative, allow = 1, integ = TRUE) || derivative<0)
+ if (!is.Numeric(derivative, allowable.length = 1,
+ integer.valued = TRUE) ||
+ derivative < 0)
stop("argument 'derivative' must be a non-negative integer")
+
ans = y
- if (!is.Numeric(epsilon, allow = 1, posit = TRUE))
+ if (!is.Numeric(epsilon, allowable.length = 1, positive = TRUE))
stop("argument 'epsilon' must be a single positive number")
L = max(length(lambda), length(y))
- if (length(y) != L) y = rep(y, len=L)
- if (length(lambda) != L) lambda = rep(lambda, len=L) # lambda may be of length 1
+ if (length(y) != L) y = rep(y, length.out = L)
+ if (length(lambda) != L) lambda = rep(lambda, length.out = L) # lambda may be of length 1
if (inverse) {
if (derivative != 0)
@@ -409,16 +419,16 @@ dyj.dy.yeojohnson = function(y, lambda) {
dpsi.dlambda.yjn = function(psi, lambda, mymu, sigma,
derivative = 0, smallno=1.0e-8) {
- if (!is.Numeric(derivative, allow = 1, integ = TRUE) || derivative<0)
+ if (!is.Numeric(derivative, allowable.length = 1, integer.valued = TRUE) || derivative<0)
stop("argument 'derivative' must be a non-negative integer")
- if (!is.Numeric(smallno, allow = 1, posit = TRUE))
+ if (!is.Numeric(smallno, allowable.length = 1, positive = TRUE))
stop("argument 'smallno' must be a single positive number")
L = max(length(psi), length(lambda), length(mymu), length(sigma))
- if (length(psi) != L) psi = rep(psi, len=L)
- if (length(lambda) != L) lambda = rep(lambda, len=L)
- if (length(mymu) != L) mymu = rep(mymu, len=L)
- if (length(sigma) != L) sigma = rep(sigma, len=L)
+ if (length(psi) != L) psi = rep(psi, length.out = L)
+ if (length(lambda) != L) lambda = rep(lambda, length.out = L)
+ if (length(mymu) != L) mymu = rep(mymu, length.out = L)
+ if (length(sigma) != L) sigma = rep(sigma, length.out = L)
answer = matrix(as.numeric(NA), L, derivative+1)
CC = psi >= 0
@@ -584,12 +594,14 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
lmu = as.character(substitute(lmu))
if (mode(lsigma) != "character" && mode(lsigma) != "name")
lsigma = as.character(substitute(lsigma))
+
if (!is.list(elambda)) elambda = list()
if (!is.list(emu)) emu = list()
if (!is.list(esigma)) esigma = list()
if (!is.Numeric(ilambda))
+
stop("bad input for argument 'ilambda'")
- if (length(isigma) && !is.Numeric(isigma, posit = TRUE))
+ if (length(isigma) && !is.Numeric(isigma, positive = TRUE))
stop("bad input for argument 'isigma'")
new("vglmff",
@@ -627,7 +639,7 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
y=y.tx, w = w, df = .dfmu.init)
c(predict(fit700, x = x[, min(ncol(x), 2)])$y)
} else {
- rep(weighted.mean(y, w), len = n)
+ rep(weighted.mean(y, w), length.out = n)
}
sigma.init = if (!is.Numeric(.isigma)) {
@@ -811,7 +823,7 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
y = y.tx, w = w, df = .dfmu.init)
fv.init = c(predict(fit700, x = x[, min(ncol(x), 2)])$y)
} else {
- fv.init = rep(weighted.mean(y, w), len = n)
+ fv.init = rep(weighted.mean(y, w), length.out = n)
}
sigma.init = if (!is.Numeric(.isigma)) {
@@ -1090,9 +1102,9 @@ amlnormal.deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
{
- if (!is.Numeric(w.aml, posit = TRUE))
+ if (!is.Numeric(w.aml, positive = TRUE))
stop("argument 'w.aml' must be a vector of positive values")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 3)
stop("argument 'imethod' must be 1, 2 or 3")
@@ -1123,7 +1135,7 @@ amlnormal.deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
extra$M = M = length(extra$w.aml) # Recycle if necessary
extra$n = n
extra$y.names = y.names =
- paste("w.aml = ", round(extra$w.aml, dig = .digw), sep = "")
+ paste("w.aml = ", round(extra$w.aml, digits = .digw), sep = "")
predictors.names = c(namesof(
paste("expectile(",y.names,")", sep = ""), .lexpectile,
@@ -1231,8 +1243,9 @@ amlpoisson.deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
amlpoisson <- function(w.aml = 1, parallel = FALSE, imethod = 1,
digw = 4, link = "loge", earg = list())
{
- if (!is.Numeric(w.aml, posit = TRUE))
+ if (!is.Numeric(w.aml, positive = TRUE))
stop("'w.aml' must be a vector of positive values")
+
if (mode(link)!= "character" && mode(link)!= "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
@@ -1255,7 +1268,7 @@ amlpoisson.deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
extra$M = M = length(extra$w.aml) # Recycle if necessary
extra$n = n
extra$y.names = y.names =
- paste("w.aml = ", round(extra$w.aml, dig = .digw), sep = "")
+ paste("w.aml = ", round(extra$w.aml, digits = .digw), sep = "")
extra$individual = FALSE
predictors.names = c(namesof(paste("expectile(",y.names,")", sep = ""),
.link, earg = .earg, tag = FALSE))
@@ -1363,7 +1376,7 @@ amlbinomial.deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
link = "logit", earg = list())
{
- if (!is.Numeric(w.aml, posit = TRUE))
+ if (!is.Numeric(w.aml, positive = TRUE))
stop("'w.aml' must be a vector of positive values")
if (mode(link)!= "character" && mode(link)!= "name")
link = as.character(substitute(link))
@@ -1416,7 +1429,7 @@ amlbinomial.deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
extra$M = M = length(extra$w.aml) # Recycle if necessary
extra$n = n
extra$y.names = y.names =
- paste("w.aml = ", round(extra$w.aml, dig = .digw), sep = "")
+ paste("w.aml = ", round(extra$w.aml, digits = .digw), sep = "")
extra$individual = FALSE
predictors.names =
c(namesof(paste("expectile(", y.names, ")", sep = ""),
@@ -1483,7 +1496,8 @@ amlbinomial.deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
-amlexponential.deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+amlexponential.deviance = function(mu, y, w, residuals = FALSE,
+ eta, extra = NULL) {
M <- length(extra$w.aml)
@@ -1507,18 +1521,24 @@ amlexponential.deviance = function(mu, y, w, residuals = FALSE, eta, extra = NUL
}
+
+
amlexponential <- function(w.aml = 1, parallel = FALSE, imethod = 1,
digw = 4, link = "loge", earg = list())
{
- if (!is.Numeric(w.aml, posit = TRUE))
- stop("'w.aml' must be a vector of positive values")
+ if (!is.Numeric(w.aml, positive = TRUE))
+ stop("'w.aml' must be a vector of positive values")
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 3)
+ stop("argument 'imethod' must be 1, 2 or 3")
+
if (mode(link)!= "character" && mode(link)!= "name")
link = as.character(substitute(link))
+
if (!is.list(earg)) earg = list()
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
- imethod > 3) stop("argument 'imethod' must be 1, 2 or 3")
- y.names = paste("w.aml = ", round(w.aml, dig=digw), sep = "")
+ y.names = paste("w.aml = ", round(w.aml, digits = digw), sep = "")
predictors.names = c(namesof(
paste("expectile(", y.names,")", sep = ""), link, earg = earg))
predictors.names = paste(predictors.names, collapse = ", ")
@@ -1543,7 +1563,7 @@ amlexponential.deviance = function(mu, y, w, residuals = FALSE, eta, extra = NUL
extra$M = M = length(extra$w.aml) # Recycle if necessary
extra$n = n
extra$y.names = y.names =
- paste("w.aml = ", round(extra$w.aml, dig = .digw), sep = "")
+ paste("w.aml = ", round(extra$w.aml, digits = .digw), sep = "")
extra$individual = FALSE
predictors.names = c(namesof(
paste("expectile(",y.names,")", sep = ""), .link, earg = .earg, tag = FALSE))
@@ -1619,9 +1639,9 @@ dalap = function(x, location = 0, scale = 1, tau = 0.5,
rm(log)
NN = max(length(x), length(location), length(scale), length(kappa))
- location = rep(location, len = NN); scale= rep(scale, len = NN)
- kappa = rep(kappa, len = NN); x = rep(x, len = NN)
- tau = rep(tau, len = NN)
+ location = rep(location, length.out = NN); scale = rep(scale, length.out = NN)
+ kappa = rep(kappa, length.out = NN); x = rep(x, length.out = NN)
+ tau = rep(tau, length.out = NN)
logconst = 0.5 * log(2) - log(scale) + log(kappa) - log1p(kappa^2)
exponent = -(sqrt(2) / scale) * abs(x - location) *
@@ -1637,11 +1657,11 @@ dalap = function(x, location = 0, scale = 1, tau = 0.5,
ralap = function(n, location = 0, scale = 1, tau = 0.5,
kappa = sqrt(tau/(1-tau))) {
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
+ if (!is.Numeric(n, integer.valued = TRUE, allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
- location = rep(location, len=use.n); scale= rep(scale, len=use.n)
- tau = rep(tau, len=use.n); kappa = rep(kappa, len=use.n);
+ location = rep(location, length.out = use.n); scale = rep(scale, length.out = use.n)
+ tau = rep(tau, length.out = use.n); kappa = rep(kappa, length.out = use.n);
ans = location + scale *
log(runif(use.n)^kappa / runif(use.n)^(1/kappa)) / sqrt(2)
indexTF = (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
@@ -1653,9 +1673,9 @@ ralap = function(n, location = 0, scale = 1, tau = 0.5,
palap = function(q, location = 0, scale = 1, tau = 0.5,
kappa = sqrt(tau/(1-tau))) {
NN = max(length(q), length(location), length(scale), length(kappa))
- location = rep(location, len = NN); scale= rep(scale, len = NN)
- kappa = rep(kappa, len = NN); q= rep(q, len = NN)
- tau = rep(tau, len = NN);
+ location = rep(location, length.out = NN); scale = rep(scale, length.out = NN)
+ kappa = rep(kappa, length.out = NN); q= rep(q, length.out = NN)
+ tau = rep(tau, length.out = NN);
exponent = -(sqrt(2) / scale) * abs(q - location) *
ifelse(q >= location, kappa, 1/kappa)
@@ -1673,9 +1693,9 @@ palap = function(q, location = 0, scale = 1, tau = 0.5,
qalap = function(p, location = 0, scale = 1, tau = 0.5,
kappa = sqrt(tau/(1-tau))) {
NN = max(length(p), length(location), length(scale), length(kappa))
- location = rep(location, len = NN); scale= rep(scale, len = NN)
- kappa = rep(kappa, len = NN); p = rep(p, len = NN)
- tau = rep(tau, len = NN)
+ location = rep(location, length.out = NN); scale = rep(scale, length.out = NN)
+ kappa = rep(kappa, length.out = NN); p = rep(p, length.out = NN)
+ tau = rep(tau, length.out = NN)
ans = p
temp5 = kappa^2 / (1 + kappa^2)
index1 = (p <= temp5)
@@ -1698,8 +1718,8 @@ qalap = function(p, location = 0, scale = 1, tau = 0.5,
if (FALSE)
dqregal = function(x, tau = 0.5, location = 0, scale = 1) {
- if (!is.Numeric(scale, posit = TRUE)) stop("'scale' must be positive")
- if (!is.Numeric(tau, posit = TRUE) || max(tau) >= 1)
+ if (!is.Numeric(scale, positive = TRUE)) stop("'scale' must be positive")
+ if (!is.Numeric(tau, positive = TRUE) || max(tau) >= 1)
stop("'tau' must have values in (0,1)")
const = tau * (1-tau) / scale
const * exp(-rho1check((x-location)/scale, tau=tau))
@@ -1709,12 +1729,13 @@ dqregal = function(x, tau = 0.5, location = 0, scale = 1) {
if (FALSE)
rqregal = function(n, tau = 0.5, location = 0, scale = 1) {
- if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
+ if (!is.Numeric(n, positive = TRUE, integer.valued = TRUE, allowable.length = 1))
stop("bad input for argument 'n'")
- if (!is.Numeric(scale, posit = TRUE)) stop("'scale' must be positive")
- if (!is.Numeric(tau, posit = TRUE) || max(tau) >= 1)
+ if (!is.Numeric(scale, positive = TRUE)) stop("'scale' must be positive")
+ if (!is.Numeric(tau, positive = TRUE) || max(tau) >= 1)
stop("'tau' must have values in (0,1)")
- location = rep(location, len = n); scale= rep(scale, len = n)
+ location = rep(location, length.out = n);
+ scale = rep(scale, length.out = n)
r = runif(n)
location - sign(r-tau) * scale * log(2*ifelse(r < tau, r, 1-r))
}
@@ -1729,12 +1750,12 @@ pqregal = function(q, tau = 0.5, location = 0, scale = 1) {
stop("bad input for argument 'q'")
if (!is.Numeric(location))
stop("bad input for argument 'location'")
- if (!is.Numeric(scale, posit = TRUE)) stop("'scale' must be positive")
- if (!is.Numeric(tau, posit = TRUE) || max(tau) >= 1)
+ if (!is.Numeric(scale, positive = TRUE)) stop("'scale' must be positive")
+ if (!is.Numeric(tau, positive = TRUE) || max(tau) >= 1)
stop("'tau' must have values in (0,1)")
N = max(length(q), length(tau), length(location), length(scale))
- location = rep(location, len = N); scale= rep(scale, len = N)
- tau = rep(tau, len = N); q= rep(q, len = N)
+ location = rep(location, length.out = N); scale = rep(scale, length.out = N)
+ tau = rep(tau, length.out = N); q= rep(q, length.out = N)
ans = tau * exp(-(location - q) * (1 - tau))
index1 = (q > location)
ans[index1] = (1 - (1-tau) * exp(-tau * (q - location)))[index1]
@@ -1750,11 +1771,14 @@ qregal = function(tau = c(0.25, 0.5, 0.75),
parallel = FALSE, imethod = 1, digt = 4) {
if (mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 2) stop("argument 'imethod' must be 1 or 2")
- if (!is.Numeric(tau, posit = TRUE) || max(tau) >= 1)
+ if (!is.Numeric(tau, positive = TRUE) || max(tau) >= 1)
stop("bad input for argument 'tau'")
+
if (!is.list(elocation)) elocation = list()
+
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
if (!is.list(escale)) escale = list()
@@ -1774,7 +1798,7 @@ qregal = function(tau = c(0.25, 0.5, 0.75),
extra$M = M = 1 + length(extra$tau)
extra$n = n
extra$y.names = y.names =
- paste("tau = ", round(extra$tau, dig = .digt), sep = "")
+ paste("tau = ", round(extra$tau, digits = .digt), sep = "")
extra$individual = FALSE
predictors.names = c(
namesof("scale", .lscale, earg = .escale, tag = FALSE),
@@ -1790,9 +1814,9 @@ qregal = function(tau = c(0.25, 0.5, 0.75),
locat.init = if (length(.ilocat)) {
matrix( .ilocat, n, M-1, byrow = TRUE)
} else {
- rep(locat.init, len = n)
+ rep(locat.init, length.out = n)
}
- scale.init = rep(1.0, len = n)
+ scale.init = rep(1.0, length.out = n)
etastart = cbind(
theta2eta(scale.init, .lscale, earg = .escale),
matrix(
@@ -1877,14 +1901,15 @@ qregal = function(tau = c(0.25, 0.5, 0.75),
rloglap = function(n, location.ald = 0, scale.ald = 1, tau = 0.5,
kappa = sqrt(tau/(1-tau))) {
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
- stop("bad input for argument 'n'") else n
- location.ald = rep(location.ald, len=use.n);
- scale.ald= rep(scale.ald, len=use.n)
- tau = rep(tau, len=use.n);
- kappa = rep(kappa, len=use.n);
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+ location.ald = rep(location.ald, length.out = use.n);
+ scale.ald= rep(scale.ald, length.out = use.n)
+ tau = rep(tau, length.out = use.n);
+ kappa = rep(kappa, length.out = use.n);
ans = exp(location.ald) *
- (runif(use.n)^kappa / runif(use.n)^(1/kappa))^(scale.ald / sqrt(2))
+ (runif(use.n)^kappa / runif(use.n)^(1/kappa))^(scale.ald / sqrt(2))
indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
ans[!indexTF] = NaN
ans
@@ -1896,15 +1921,19 @@ dloglap = function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
if (!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
rm(log)
- NN = max(length(x), length(location.ald), length(scale.ald), length(kappa))
- location = rep(location.ald, len = NN); scale= rep(scale.ald, len = NN)
- kappa = rep(kappa, len = NN); x = rep(x, len = NN)
- tau = rep(tau, len = NN)
+ NN = max(length(x), length(location.ald),
+ length(scale.ald), length(kappa))
+ location = rep(location.ald, length.out = NN);
+ scale = rep(scale.ald, length.out = NN)
+ kappa = rep(kappa, length.out = NN);
+ x = rep(x, length.out = NN)
+ tau = rep(tau, length.out = NN)
Alpha = sqrt(2) * kappa / scale.ald
Beta = sqrt(2) / (scale.ald * kappa)
Delta = exp(location.ald)
- exponent = ifelse(x >= Delta, -(Alpha+1), (Beta-1)) * (log(x) - location.ald)
+ exponent = ifelse(x >= Delta, -(Alpha+1), (Beta-1)) *
+ (log(x) - location.ald)
logdensity = -location.ald + log(Alpha) + log(Beta) -
log(Alpha + Beta) + exponent
indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
@@ -1916,10 +1945,13 @@ dloglap = function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
qloglap = function(p, location.ald = 0, scale.ald = 1,
tau = 0.5, kappa = sqrt(tau/(1-tau))) {
- NN = max(length(p), length(location.ald), length(scale.ald), length(kappa))
- location = rep(location.ald, len = NN); scale= rep(scale.ald, len = NN)
- kappa = rep(kappa, len = NN); p = rep(p, len = NN)
- tau = rep(tau, len = NN)
+ NN = max(length(p), length(location.ald), length(scale.ald),
+ length(kappa))
+ location = rep(location.ald, length.out = NN);
+ scale = rep(scale.ald, length.out = NN)
+ kappa = rep(kappa, length.out = NN);
+ p = rep(p, length.out = NN)
+ tau = rep(tau, length.out = NN)
Alpha = sqrt(2) * kappa / scale.ald
Beta = sqrt(2) / (scale.ald * kappa)
@@ -1942,10 +1974,13 @@ qloglap = function(p, location.ald = 0, scale.ald = 1,
ploglap = function(q, location.ald = 0, scale.ald = 1,
tau = 0.5, kappa = sqrt(tau/(1-tau))) {
- NN = max(length(q), length(location.ald), length(scale.ald), length(kappa))
- location = rep(location.ald, len = NN); scale = rep(scale.ald, len = NN)
- kappa = rep(kappa, len = NN); q = rep(q, len = NN)
- tau = rep(tau, len = NN)
+ NN = max(length(q), length(location.ald), length(scale.ald),
+ length(kappa))
+ location = rep(location.ald, length.out = NN);
+ scale = rep(scale.ald, length.out = NN)
+ kappa = rep(kappa, length.out = NN);
+ q = rep(q, length.out = NN)
+ tau = rep(tau, length.out = NN)
Alpha = sqrt(2) * kappa / scale.ald
Beta = sqrt(2) / (scale.ald * kappa)
@@ -1978,9 +2013,9 @@ dlogitlap = function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
rm(log)
NN = max(length(x), length(location.ald), length(scale.ald), length(kappa))
- location = rep(location.ald, len = NN); scale= rep(scale.ald, len = NN)
- kappa = rep(kappa, len = NN); x = rep(x, len = NN)
- tau = rep(tau, len = NN)
+ location = rep(location.ald, length.out = NN); scale = rep(scale.ald, length.out = NN)
+ kappa = rep(kappa, length.out = NN); x = rep(x, length.out = NN)
+ tau = rep(tau, length.out = NN)
Alpha = sqrt(2) * kappa / scale.ald
Beta = sqrt(2) / (scale.ald * kappa)
@@ -2015,9 +2050,9 @@ plogitlap = function(q, location.ald = 0, scale.ald = 1,
tau = 0.5, kappa = sqrt(tau/(1-tau)), earg = list()) {
NN = max(length(q), length(location.ald), length(scale.ald),
length(kappa))
- location.ald = rep(location.ald, len = NN); scale.ald= rep(scale.ald, len = NN)
- kappa = rep(kappa, len = NN); q= rep(q, len = NN)
- tau = rep(tau, len = NN);
+ location.ald = rep(location.ald, length.out = NN); scale.ald= rep(scale.ald, length.out = NN)
+ kappa = rep(kappa, length.out = NN); q= rep(q, length.out = NN)
+ tau = rep(tau, length.out = NN);
indexTF = (q > 0) & (q < 1)
qqq = logit(q[indexTF], earg = earg)
@@ -2046,9 +2081,9 @@ dprobitlap = function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
rm(log)
NN = max(length(x), length(location.ald), length(scale.ald), length(kappa))
- location.ald = rep(location.ald, len = NN); scale.ald= rep(scale.ald, len = NN)
- kappa = rep(kappa, len = NN); x = rep(x, len = NN)
- tau = rep(tau, len = NN)
+ location.ald = rep(location.ald, length.out = NN); scale.ald= rep(scale.ald, length.out = NN)
+ kappa = rep(kappa, length.out = NN); x = rep(x, length.out = NN)
+ tau = rep(tau, length.out = NN)
logdensity = x * NaN
index1 = (x > 0) & (x < 1)
@@ -2104,9 +2139,9 @@ pprobitlap = function(q, location.ald = 0, scale.ald = 1,
tau = 0.5, kappa = sqrt(tau/(1-tau)), earg = list()) {
NN = max(length(q), length(location.ald), length(scale.ald),
length(kappa))
- location.ald = rep(location.ald, len = NN); scale.ald= rep(scale.ald, len = NN)
- kappa = rep(kappa, len = NN); q= rep(q, len = NN)
- tau = rep(tau, len = NN);
+ location.ald = rep(location.ald, length.out = NN); scale.ald= rep(scale.ald, length.out = NN)
+ kappa = rep(kappa, length.out = NN); q= rep(q, length.out = NN)
+ tau = rep(tau, length.out = NN);
indexTF = (q > 0) & (q < 1)
qqq = probit(q[indexTF], earg = earg)
@@ -2134,9 +2169,9 @@ dclogloglap = function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
rm(log)
NN = max(length(x), length(location.ald), length(scale.ald), length(kappa))
- location.ald = rep(location.ald, len = NN); scale.ald= rep(scale.ald, len = NN)
- kappa = rep(kappa, len = NN); x = rep(x, len = NN)
- tau = rep(tau, len = NN)
+ location.ald = rep(location.ald, length.out = NN); scale.ald= rep(scale.ald, length.out = NN)
+ kappa = rep(kappa, length.out = NN); x = rep(x, length.out = NN)
+ tau = rep(tau, length.out = NN)
logdensity = x * NaN
index1 = (x > 0) & (x < 1)
@@ -2191,9 +2226,9 @@ pclogloglap = function(q, location.ald = 0, scale.ald = 1,
tau = 0.5, kappa = sqrt(tau/(1-tau)), earg = list()) {
NN = max(length(q), length(location.ald), length(scale.ald),
length(kappa))
- location.ald = rep(location.ald, len = NN); scale.ald= rep(scale.ald, len = NN)
- kappa = rep(kappa, len = NN); q= rep(q, len = NN)
- tau = rep(tau, len = NN);
+ location.ald = rep(location.ald, length.out = NN); scale.ald= rep(scale.ald, length.out = NN)
+ kappa = rep(kappa, length.out = NN); q= rep(q, length.out = NN)
+ tau = rep(tau, length.out = NN);
indexTF = (q > 0) & (q < 1)
qqq = cloglog(q[indexTF], earg = earg)
@@ -2238,18 +2273,18 @@ alaplace2.control <- function(maxit = 100, ...)
elocat <- elocation
ilocat <- ilocation
- if (!is.Numeric(kappa, posit = TRUE))
+ if (!is.Numeric(kappa, positive = TRUE))
stop("bad input for argument 'kappa'")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 4)
stop("argument 'imethod' must be 1, 2 or ... 4")
- if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
+ if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
- if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
+ if (!is.Numeric(shrinkage.init, allowable.length = 1) || shrinkage.init < 0 ||
shrinkage.init > 1)
stop("bad input for argument 'shrinkage.init'")
if (length(zero) &&
- !(is.Numeric(zero, integer = TRUE) ||
+ !(is.Numeric(zero, integer.valued = TRUE) ||
is.character(zero )))
stop("bad input for argument 'zero'")
@@ -2357,7 +2392,7 @@ alaplace2.control <- function(maxit = 100, ...)
extra$tau.names = tau.names =
- paste("(tau = ", round(extra$tau, dig = .digt), ")", sep = "")
+ paste("(tau = ", round(extra$tau, digits = .digt), ")", sep = "")
extra$Y.names = Y.names = if (ncoly > 1) dimnames(y)[[2]] else "y"
if (is.null(Y.names) || any(Y.names == ""))
extra$Y.names = Y.names = paste("y", 1:ncoly, sep = "")
@@ -2568,22 +2603,22 @@ alaplace1.control <- function(maxit = 100, ...)
- if (!is.Numeric(kappa, posit = TRUE))
+ if (!is.Numeric(kappa, positive = TRUE))
stop("bad input for argument 'kappa'")
if (length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
stop("arguments 'kappa' and 'tau' do not match")
if (mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 4)
stop("argument 'imethod' must be 1, 2 or ... 4")
if (!is.list(elocation)) elocation = list()
- if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
+ if (!is.Numeric(shrinkage.init, allowable.length = 1) || shrinkage.init < 0 ||
shrinkage.init > 1)
stop("bad input for argument 'shrinkage.init'")
- if (!is.Numeric(Scale.arg, posit = TRUE))
+ if (!is.Numeric(Scale.arg, positive = TRUE))
stop("bad input for argument 'Scale.arg'")
if (!is.logical(parallelLocation) || length(parallelLocation) != 1)
@@ -2663,7 +2698,7 @@ alaplace1.control <- function(maxit = 100, ...)
extra$tau.names = tau.names =
- paste("(tau = ", round(extra$tau, dig = .digt), ")", sep = "")
+ paste("(tau = ", round(extra$tau, digits = .digt), ")", sep = "")
extra$Y.names = Y.names = if (ncoly > 1) dimnames(y)[[2]] else "y"
if (is.null(Y.names) || any(Y.names == ""))
extra$Y.names = Y.names = paste("y", 1:ncoly, sep = "")
@@ -2824,12 +2859,12 @@ alaplace3.control <- function(maxit = 100, ...)
if (mode(lkappa) != "character" && mode(lkappa) != "name")
lkappa = as.character(substitute(lkappa))
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 2)
stop("argument 'imethod' must be 1 or 2")
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
+ if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
if (!is.list(elocation)) elocation = list()
@@ -2857,8 +2892,9 @@ alaplace3.control <- function(maxit = 100, ...)
namesof("scale", .lscale, earg = .escale, tag = FALSE),
namesof("kappa", .lkappa, earg = .ekappa, tag = FALSE))
if (!length(etastart)) {
- kappa.init = if (length( .ikappa)) rep( .ikappa, len = n) else
- rep( 1.0, len = n)
+ kappa.init = if (length( .ikappa))
+ rep( .ikappa, length.out = n) else
+ rep( 1.0, length.out = n)
if ( .imethod == 1) {
locat.init = median(y)
scale.init = sqrt(var(y) / 2)
@@ -2866,10 +2902,12 @@ alaplace3.control <- function(maxit = 100, ...)
locat.init = y
scale.init = sqrt(sum(w*abs(y-median(y ))) / (sum(w) *2))
}
- locat.init = if (length( .ilocat)) rep( .ilocat, len = n) else
- rep(locat.init, len = n)
- scale.init = if (length( .iscale)) rep( .iscale, len = n) else
- rep(scale.init, len = n)
+ locat.init = if (length( .ilocat))
+ rep( .ilocat, length.out = n) else
+ rep(locat.init, length.out = n)
+ scale.init = if (length( .iscale))
+ rep( .iscale, length.out = n) else
+ rep(scale.init, length.out = n)
etastart =
cbind(theta2eta(locat.init, .llocat, earg = .elocat),
theta2eta(scale.init, .lscale, earg = .escale),
@@ -2967,27 +3005,32 @@ dlaplace = function(x, location = 0, scale = 1, log = FALSE) {
}
plaplace = function(q, location = 0, scale = 1) {
- if (!is.Numeric(scale, posit = TRUE))
+ if (!is.Numeric(scale, positive = TRUE))
stop("argument 'scale' must be positive")
zedd = (q-location) / scale
L = max(length(q), length(location), length(scale))
- q = rep(q, len=L); location = rep(location, len=L); scale= rep(scale, len=L)
+ q = rep(q, length.out = L); location = rep(location, length.out = L);
+ scale = rep(scale, length.out = L)
ifelse(q < location, 0.5*exp(zedd), 1-0.5*exp(-zedd))
}
qlaplace = function(p, location = 0, scale = 1) {
- if (!is.Numeric(scale, posit = TRUE))
+ if (!is.Numeric(scale, positive = TRUE))
stop("argument 'scale' must be positive")
L = max(length(p), length(location), length(scale))
- p = rep(p, len=L); location = rep(location, len=L); scale= rep(scale, len=L)
+ p = rep(p, length.out = L); location = rep(location, length.out = L);
+ scale = rep(scale, length.out = L)
location - sign(p-0.5) * scale * log(2*ifelse(p < 0.5, p, 1-p))
}
rlaplace = function(n, location = 0, scale = 1) {
- if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
- stop("bad input for argument 'n'")
- if (!is.Numeric(scale, posit = TRUE)) stop("'scale' must be positive")
- location = rep(location, len = n); scale= rep(scale, len = n)
+ if (!is.Numeric(n, positive = TRUE,
+ integer.valued = TRUE, allowable.length = 1))
+ stop("bad input for argument 'n'")
+ if (!is.Numeric(scale, positive = TRUE))
+ stop("'scale' must be positive")
+ location = rep(location, length.out = n);
+ scale = rep(scale, length.out = n)
r = runif(n)
location - sign(r-0.5) * scale * log(2*ifelse(r < 0.5, r, 1-r))
}
@@ -3001,13 +3044,16 @@ rlaplace = function(n, location = 0, scale = 1) {
llocation = as.character(substitute(llocation))
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 3) stop("argument 'imethod' must be 1 or 2 or 3")
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
+
if (!is.list(elocation)) elocation = list()
if (!is.list(escale)) escale = list()
- if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
+
+ if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
new("vglmff",
@@ -3038,10 +3084,12 @@ rlaplace = function(n, location = 0, scale = 1) {
locat.init = median(y)
scale.init = sqrt(sum(w*abs(y-median(y ))) / (sum(w) *2))
}
- locat.init = if (length( .ilocat)) rep( .ilocat, len = n) else
- rep(locat.init, len = n)
- scale.init = if (length( .iscale)) rep( .iscale, len = n) else
- rep(scale.init, len = n)
+ locat.init = if (length( .ilocat))
+ rep( .ilocat, length.out = n) else
+ rep(locat.init, length.out = n)
+ scale.init = if (length( .iscale))
+ rep( .iscale, length.out = n) else
+ rep(scale.init, length.out = n)
etastart =
cbind(theta2eta(locat.init, .llocat, earg = .elocat),
theta2eta(scale.init, .lscale, earg = .escale))
@@ -3106,12 +3154,14 @@ fff.control <- function(save.weight = TRUE, ...)
imethod = 1, zero = NULL) {
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 2) stop("argument 'imethod' must be 1 or 2")
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+
+ if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
+
if (!is.list(earg)) earg = list()
- if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 10)
+ if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE) || nsimEIM <= 10)
stop("argument 'nsimEIM' should be an integer greater than 10")
ncp = 0
if (any(ncp != 0)) warning("not sure about ncp != 0 wrt dl/dtheta")
@@ -3146,10 +3196,12 @@ fff.control <- function(save.weight = TRUE, ...)
var.est = summy[5] - summy[2]
df1.init = 2*b^2*(b-2)/(var.est*(b-2)^2 * (b-4) - 2*b^2)
}
- df1.init = if (length( .idf1)) rep( .idf1, len = n) else
- rep(df1.init, len = n)
- df2.init = if (length( .idf2)) rep( .idf2, len = n) else
- rep(1, len = n)
+ df1.init = if (length( .idf1))
+ rep( .idf1, length.out = n) else
+ rep(df1.init, length.out = n)
+ df2.init = if (length( .idf2))
+ rep( .idf2, length.out = n) else
+ rep(1, length.out = n)
etastart = cbind(theta2eta(df1.init, .link, earg = .earg),
theta2eta(df2.init, .link, earg = .earg))
}
@@ -3251,7 +3303,7 @@ fff.control <- function(save.weight = TRUE, ...)
is.data.frame(x)) ncol(x) else as.integer(1)
if (NCOL(y) == 1) {
if (is.factor(y)) y = y != levels(y)[1]
- nn = rep(1, len = n)
+ nn = rep(1, length.out = n)
if (!all(y >= 0 & y <= 1))
stop("response values must be in [0, 1]")
mustart = (0.5 + w * y) / (1 + w)
@@ -3274,8 +3326,9 @@ fff.control <- function(save.weight = TRUE, ...)
extra$Dvector = .D
extra$Nunknown = length(extra$Nvector) == 0
if (!length(etastart)) {
- init.prob = if (length( .iprob)) rep( .iprob, len = n) else
- mustart
+ init.prob = if (length( .iprob))
+ rep( .iprob, length.out = n) else
+ mustart
etastart = matrix(init.prob, n, ncol(cbind(y )))
}
@@ -3370,9 +3423,9 @@ dbenini = function(x, shape, y0, log = FALSE) {
rm(log)
N = max(length(x), length(shape), length(y0))
- x = rep(x, len = N); shape = rep(shape, len = N); y0 = rep(y0, len = N);
+ x = rep(x, length.out = N); shape = rep(shape, length.out = N); y0 = rep(y0, length.out = N);
- logdensity = rep(log(0), len = N)
+ logdensity = rep(log(0), length.out = N)
xok = (x > y0)
tempxok = log(x[xok]/y0[xok])
logdensity[xok] = log(2*shape[xok]) - shape[xok] * tempxok^2 +
@@ -3383,12 +3436,12 @@ dbenini = function(x, shape, y0, log = FALSE) {
pbenini = function(q, shape, y0) {
if (!is.Numeric(q))
stop("bad input for argument 'q'")
- if (!is.Numeric(shape, posit = TRUE))
+ if (!is.Numeric(shape, positive = TRUE))
stop("bad input for argument 'shape'")
- if (!is.Numeric(y0, posit = TRUE))
+ if (!is.Numeric(y0, positive = TRUE))
stop("bad input for argument 'y0'")
N = max(length(q), length(shape), length(y0))
- q = rep(q, len = N); shape = rep(shape, len = N); y0 = rep(y0, len = N);
+ q = rep(q, length.out = N); shape = rep(shape, length.out = N); y0 = rep(y0, length.out = N);
ans = y0 * 0
ok = q > y0
ans[ok] = -expm1(-shape[ok] * (log(q[ok]/y0[ok]))^2)
@@ -3396,21 +3449,21 @@ pbenini = function(q, shape, y0) {
}
qbenini = function(p, shape, y0) {
- if (!is.Numeric(p, posit = TRUE) || any(p >= 1))
+ if (!is.Numeric(p, positive = TRUE) || any(p >= 1))
stop("bad input for argument 'p'")
- if (!is.Numeric(shape, posit = TRUE))
+ if (!is.Numeric(shape, positive = TRUE))
stop("bad input for argument 'shape'")
- if (!is.Numeric(y0, posit = TRUE))
+ if (!is.Numeric(y0, positive = TRUE))
stop("bad input for argument 'y0'")
y0 * exp(sqrt(-log1p(-p) / shape))
}
rbenini = function(n, shape, y0) {
- if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
+ if (!is.Numeric(n, positive = TRUE, integer.valued = TRUE, allowable.length = 1))
stop("bad input for argument 'n'")
- if (!is.Numeric(shape, posit = TRUE))
+ if (!is.Numeric(shape, positive = TRUE))
stop("bad input for argument 'shape'")
- if (!is.Numeric(y0, posit = TRUE))
+ if (!is.Numeric(y0, positive = TRUE))
stop("bad input for argument 'y0'")
y0 * exp(sqrt(-log(runif(n)) / shape))
}
@@ -3420,10 +3473,12 @@ rbenini = function(n, shape, y0) {
ishape = NULL, imethod = 1) {
if (mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 2) stop("argument 'imethod' must be 1 or 2")
- if (!is.Numeric(y0, allow = 1, posit = TRUE))
+ if (!is.Numeric(y0, allowable.length = 1, positive = TRUE))
stop("bad input for argument 'y0'")
+
if (!is.list(earg)) earg = list()
new("vglmff",
@@ -3447,8 +3502,9 @@ rbenini = function(n, shape, y0) {
} else {
shape.init = median(-log1p(-probs) / (log(qofy))^2)
}
- shape.init = if (length( .ishape)) rep( .ishape, len = n) else
- rep(shape.init, len = n)
+ shape.init = if (length( .ishape))
+ rep( .ishape, length.out = n) else
+ rep(shape.init, length.out = n)
etastart = cbind(theta2eta(shape.init, .lshape, earg = .earg))
}
}), list( .imethod = imethod, .ishape=ishape, .lshape = lshape, .earg = earg,
@@ -3457,8 +3513,8 @@ rbenini = function(n, shape, y0) {
shape = eta2theta(eta, .lshape, earg = .earg)
temp = 1/(4*shape)
extra$y0 * exp(temp) *
- ((sqrt(pi) * (1 - pgamma(temp, 0.5 ))) / (2*sqrt(shape)) +
- 1 - pgamma(temp, 1))
+ ((sqrt(pi) * pgamma(temp, 0.5, lower.tail = FALSE)) / (2*sqrt(shape)) +
+ pgamma(temp, 1.0, lower.tail = FALSE))
}, list( .lshape = lshape, .earg = earg ))),
last = eval(substitute(expression({
misc$link = c(shape = .lshape)
@@ -3500,7 +3556,7 @@ dpolono = function(x, meanlog = 0, sdlog = 1, bigx = Inf, ...) {
stop("bad input for argument 'x'")
if (!is.Numeric(meanlog))
stop("bad input for argument 'meanlog'")
- if (!is.Numeric(sdlog, posit = TRUE))
+ if (!is.Numeric(sdlog, positive = TRUE))
stop("bad input for argument 'sdlog'")
if (length(bigx) != 1)
@@ -3509,7 +3565,9 @@ dpolono = function(x, meanlog = 0, sdlog = 1, bigx = Inf, ...) {
warning("argument 'bigx' is probably too small")
N = max(length(x), length(meanlog), length(sdlog))
- x = rep(x, len = N); meanlog = rep(meanlog, len = N); sdlog = rep(sdlog, len = N)
+ x = rep(x, length.out = N);
+ meanlog = rep(meanlog, length.out = N);
+ sdlog = rep(sdlog, length.out = N)
ans = x * 0
integrand = function(t, x, meanlog, sdlog)
exp(t*x - exp(t) - 0.5*((t-meanlog)/sdlog)^2)
@@ -3550,9 +3608,9 @@ dpolono = function(x, meanlog = 0, sdlog = 1, bigx = Inf, ...) {
exp(-0.5 * z^2) / (sqrt(2 * pi) * sdlog * x)
} else
integrate( function(t) exp(t * x - exp(t) -
- 0.5 * ((t - meanlog) / sdlog)^2),
- lower = -Inf, upper = Inf, ...)$value / (sqrt(2 * pi) *
- sdlog * exp(lgamma(x + 1.0)))
+ 0.5 * ((t - meanlog) / sdlog)^2),
+ lower = -Inf, upper = Inf, ...)$value / (sqrt(2 * pi) *
+ sdlog * exp(lgamma(x + 1.0)))
}, x, meanlog, sdlog, ...)
}
@@ -3584,8 +3642,8 @@ ppolono <- function(q, meanlog = 0, sdlog = 1,
rpolono = function(n, meanlog = 0, sdlog = 1) {
- lambda = rlnorm(n=n, meanlog=meanlog, sdlog = sdlog)
- rpois(n=n, lambda=lambda)
+ lambda = rlnorm(n = n, meanlog = meanlog, sdlog = sdlog)
+ rpois(n = n, lambda = lambda)
}
@@ -3604,12 +3662,12 @@ dtriangle = function(x, theta, lower = 0, upper = 1, log = FALSE) {
rm(log)
N = max(length(x), length(theta), length(lower), length(upper))
- x = rep(x, len = N); lower = rep(lower, len = N); upper = rep(upper, len = N);
- theta = rep(theta, len = N)
+ x = rep(x, length.out = N); lower = rep(lower, length.out = N); upper = rep(upper, length.out = N);
+ theta = rep(theta, length.out = N)
denom1 = ((upper-lower)*(theta-lower))
denom2 = ((upper-lower)*(upper-theta))
- logdensity = rep(log(0), len = N)
+ logdensity = rep(log(0), length.out = N)
xok.neg = (lower < x) & (x <= theta)
xok.pos = (theta <= x) & (x < upper)
logdensity[xok.neg] = log(2 * (x[xok.neg]-lower[xok.neg]) / denom1[xok.neg])
@@ -3622,7 +3680,7 @@ dtriangle = function(x, theta, lower = 0, upper = 1, log = FALSE) {
rtriangle = function(n, theta, lower = 0, upper = 1) {
- if (!is.Numeric(n, integ = TRUE,allow = 1))
+ if (!is.Numeric(n, integer.valued = TRUE,allowable.length = 1))
stop("bad input for argument 'n'")
if (!is.Numeric(theta))
stop("bad input for argument 'theta'")
@@ -3633,8 +3691,8 @@ rtriangle = function(n, theta, lower = 0, upper = 1) {
if (!all(lower < theta & theta < upper))
stop("lower < theta < upper values are required")
N = n
- lower = rep(lower, len = N); upper = rep(upper, len = N);
- theta = rep(theta, len = N)
+ lower = rep(lower, length.out = N); upper = rep(upper, length.out = N);
+ theta = rep(theta, length.out = N)
t1 = sqrt(runif(n))
t2 = sqrt(runif(n))
ifelse(runif(n) < (theta-lower)/(upper-lower),
@@ -3644,7 +3702,7 @@ rtriangle = function(n, theta, lower = 0, upper = 1) {
qtriangle = function(p, theta, lower = 0, upper = 1) {
- if (!is.Numeric(p, posit = TRUE))
+ if (!is.Numeric(p, positive = TRUE))
stop("bad input for argument 'p'")
if (!is.Numeric(theta))
stop("bad input for argument 'theta'")
@@ -3656,8 +3714,8 @@ qtriangle = function(p, theta, lower = 0, upper = 1) {
stop("lower < theta < upper values are required")
N = max(length(p), length(theta), length(lower), length(upper))
- p = rep(p, len = N); lower = rep(lower, len = N); upper = rep(upper, len = N);
- theta = rep(theta, len = N)
+ p = rep(p, length.out = N); lower = rep(lower, length.out = N); upper = rep(upper, length.out = N);
+ theta = rep(theta, length.out = N)
bad = (p < 0) | (p > 1)
if (any(bad))
@@ -3694,8 +3752,9 @@ ptriangle = function(q, theta, lower = 0, upper = 1) {
stop("lower < theta < upper values are required")
N = max(length(q), length(theta), length(lower), length(upper))
- q = rep(q, len = N); lower = rep(lower, len = N); upper = rep(upper, len = N);
- theta = rep(theta, len = N)
+ q = rep(q, length.out = N); lower = rep(lower, length.out = N);
+ upper = rep(upper, length.out = N);
+ theta = rep(theta, length.out = N)
ans = q * 0
qstar = (q - lower)^2 / ((upper-lower) * (theta-lower))
@@ -3738,8 +3797,8 @@ ptriangle = function(q, theta, lower = 0, upper = 1) {
y = as.numeric(y)
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
- extra$lower = rep( .lower, len = n)
- extra$upper = rep( .upper, len = n)
+ extra$lower = rep( .lower, length.out = n)
+ extra$upper = rep( .upper, length.out = n)
if (any(y <= extra$lower | y >= extra$upper))
stop("some y values in [lower,upper] detected")
@@ -3834,26 +3893,26 @@ loglaplace1.control <- function(maxit = 300, ...)
stop("bad input for argument 'minquantile'")
if (length(maxquantile) != 1)
stop("bad input for argument 'maxquantile'")
- if (!is.Numeric(rep0, posit = TRUE, allow = 1) || rep0 > 1)
+ if (!is.Numeric(rep0, positive = TRUE, allowable.length = 1) || rep0 > 1)
stop("bad input for argument 'rep0'")
- if (!is.Numeric(kappa, posit = TRUE))
+ if (!is.Numeric(kappa, positive = TRUE))
stop("bad input for argument 'kappa'")
if (length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
stop("arguments 'kappa' and 'tau' do not match")
if (mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 4)
stop("argument 'imethod' must be 1, 2 or ... 4")
if (!is.list(elocation)) elocation = list()
- if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
+ if (!is.Numeric(shrinkage.init, allowable.length = 1) || shrinkage.init < 0 ||
shrinkage.init > 1)
stop("bad input for argument 'shrinkage.init'")
if (length(zero) &&
- !(is.Numeric(zero, integer = TRUE, posit = TRUE) || is.character(zero )))
+ !(is.Numeric(zero, integer.valued = TRUE, positive = TRUE) || is.character(zero )))
stop("bad input for argument 'zero'")
- if (!is.Numeric(Scale.arg, posit = TRUE))
+ if (!is.Numeric(Scale.arg, positive = TRUE))
stop("bad input for argument 'Scale.arg'")
if (!is.logical(parallelLocation) || length(parallelLocation) != 1)
stop("bad input for argument 'parallelLocation'")
@@ -3862,7 +3921,8 @@ loglaplace1.control <- function(maxit = 300, ...)
stop("bad input for argument 'fittedMean'")
mystring0 = namesof("location", llocation, earg = elocation)
- mychars = substring(mystring0, fi=1:nchar(mystring0), la=1:nchar(mystring0))
+ mychars = substring(mystring0, first = 1:nchar(mystring0),
+ last = 1:nchar(mystring0))
mychars[nchar(mystring0)] = ", inverse = TRUE)"
mystring1 = paste(mychars, collapse = "")
@@ -3888,7 +3948,7 @@ loglaplace1.control <- function(maxit = 300, ...)
stop("response must be a vector or a one-column matrix")
extra$n = n
extra$y.names = y.names =
- paste("tau = ", round(extra$tau, dig = .digt), sep = "")
+ paste("tau = ", round(extra$tau, digits = .digt), sep = "")
extra$individual = FALSE
predictors.names = namesof(paste("quantile(", y.names, ")", sep = ""),
.llocat, earg = .elocat, tag = FALSE)
@@ -3898,13 +3958,13 @@ loglaplace1.control <- function(maxit = 300, ...)
if (min(y) < 0)
stop("negative response values detected")
if ((prop.0. <- weighted.mean(1*(y == 0), w)) >= min(extra$tau))
- stop("sample proportion of 0s == ", round(prop.0., dig=4),
- " > minimum 'tau' value. Choose larger values for 'tau'.")
+ stop("sample proportion of 0s == ", round(prop.0., digits = 4),
+ " > minimum 'tau' value. Choose larger values for 'tau'.")
if ( .rep0 == 0.5 &&
(ave.tau <- (weighted.mean(1*(y <= 0), w) +
weighted.mean(1*(y <= 1), w))/2) >= min(extra$tau))
warning("the minimum 'tau' value should be greater than ",
- round(ave.tau, dig=4))
+ round(ave.tau, digits = 4))
}
if (!length(etastart)) {
@@ -3922,8 +3982,9 @@ loglaplace1.control <- function(maxit = 300, ...)
use.this = weighted.mean(y, w)
locat.init = (1- .sinit)*y + .sinit * use.this
}
- locat.init = if (length( .ilocat)) rep( .ilocat, len = M) else
- rep(locat.init, len = M)
+ locat.init = if (length( .ilocat))
+ rep( .ilocat, length.out = M) else
+ rep(locat.init, length.out = M)
locat.init = matrix(locat.init, n, M, byrow = TRUE)
if ( .llocat == "loge")
locat.init = abs(locat.init)
@@ -4043,11 +4104,11 @@ loglaplace2.control <- function(save.weight = TRUE, ...)
warning("it is best to use loglaplace1()")
if (length(nsimEIM) &&
- (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 10))
+ (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE) || nsimEIM <= 10))
stop("argument 'nsimEIM' should be an integer greater than 10")
- if (!is.Numeric(rep0, posit = TRUE, allow = 1) || rep0 > 1)
+ if (!is.Numeric(rep0, positive = TRUE, allowable.length = 1) || rep0 > 1)
stop("bad input for argument 'rep0'")
- if (!is.Numeric(kappa, posit = TRUE))
+ if (!is.Numeric(kappa, positive = TRUE))
stop("bad input for argument 'kappa'")
if (length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
stop("arguments 'kappa' and 'tau' do not match")
@@ -4056,18 +4117,18 @@ loglaplace2.control <- function(save.weight = TRUE, ...)
llocation = as.character(substitute(llocation))
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 4) stop("argument 'imethod' must be 1, 2 or ... 4")
- if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
+ if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
if (!is.list(elocation)) elocation = list()
if (!is.list(escale)) escale = list()
- if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
+ if (!is.Numeric(shrinkage.init, allowable.length = 1) || shrinkage.init < 0 ||
shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
if (length(zero) &&
- !(is.Numeric(zero, integer = TRUE, posit = TRUE) ||
+ !(is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
is.character(zero )))
stop("bad input for argument 'zero'")
if (!is.logical(sameScale) || length(sameScale) != 1)
@@ -4125,7 +4186,7 @@ loglaplace2.control <- function(save.weight = TRUE, ...)
extra$M = M = 2 * length(extra$kappa)
extra$n = n
extra$y.names = y.names =
- paste("tau = ", round(extra$tau, dig = .digt), sep = "")
+ paste("tau = ", round(extra$tau, digits = .digt), sep = "")
extra$individual = FALSE
predictors.names =
c(namesof(paste("quantile(", y.names, ")", sep = ""),
@@ -4153,11 +4214,13 @@ loglaplace2.control <- function(save.weight = TRUE, ...)
locat.init.y = (1- .sinit)*y + .sinit * use.this
scale.init = sqrt(sum(w*abs(y-median(y ))) / (sum(w) *2))
}
- locat.init.y = if (length( .ilocat)) rep( .ilocat, len = n) else
- rep(locat.init.y, len = n)
+ locat.init.y = if (length( .ilocat))
+ rep( .ilocat, length.out = n) else
+ rep(locat.init.y, length.out = n)
locat.init.y = matrix(locat.init.y, n, M/2)
- scale.init = if (length( .iscale)) rep( .iscale, len = n) else
- rep(scale.init, len = n)
+ scale.init = if (length( .iscale))
+ rep( .iscale, length.out = n) else
+ rep(scale.init, length.out = n)
scale.init = matrix(scale.init, n, M/2)
etastart =
cbind(theta2eta(locat.init.y, .llocat, earg = .elocat),
@@ -4288,6 +4351,8 @@ loglaplace2.control <- function(save.weight = TRUE, ...)
+
+
logitlaplace1.control <- function(maxit = 300, ...)
{
list(maxit = maxit)
@@ -4301,6 +4366,10 @@ adjust01.logitlaplace1 = function(ymat, y, w, rep01) {
ymat
}
+
+
+
+
logitlaplace1 = function(tau = NULL,
llocation = "logit",
elocation = list(),
@@ -4312,29 +4381,29 @@ adjust01.logitlaplace1 = function(ymat, y, w, rep01) {
rep01 = 0.5,
imethod = 1, zero = NULL) {
- if (!is.Numeric(rep01, posit = TRUE, allow = 1) || rep01 > 0.5)
+ if (!is.Numeric(rep01, positive = TRUE, allowable.length = 1) || rep01 > 0.5)
stop("bad input for argument 'rep01'")
- if (!is.Numeric(kappa, posit = TRUE))
+ if (!is.Numeric(kappa, positive = TRUE))
stop("bad input for argument 'kappa'")
if (length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
stop("arguments 'kappa' and 'tau' do not match")
if (mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 4)
stop("argument 'imethod' must be 1, 2 or ... 4")
if (!is.list(elocation)) elocation = list()
- if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
+ if (!is.Numeric(shrinkage.init, allowable.length = 1) || shrinkage.init < 0 ||
shrinkage.init > 1)
stop("bad input for argument 'shrinkage.init'")
if (length(zero) &&
- !(is.Numeric(zero, integer = TRUE, posit = TRUE) ||
+ !(is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
is.character(zero )))
stop("bad input for argument 'zero'")
- if (!is.Numeric(Scale.arg, posit = TRUE))
+ if (!is.Numeric(Scale.arg, positive = TRUE))
stop("bad input for argument 'Scale.arg'")
if (!is.logical(parallelLocation) || length(parallelLocation) != 1)
stop("bad input for argument 'parallelLocation'")
@@ -4344,8 +4413,8 @@ adjust01.logitlaplace1 = function(ymat, y, w, rep01) {
mystring0 = namesof("location", llocation, earg = elocation)
- mychars = substring(mystring0, fi = 1:nchar(mystring0),
- la = 1:nchar(mystring0))
+ mychars = substring(mystring0, first = 1:nchar(mystring0),
+ last = 1:nchar(mystring0))
mychars[nchar(mystring0)] = ", inverse = TRUE)"
mystring1 = paste(mychars, collapse = "")
@@ -4369,7 +4438,7 @@ adjust01.logitlaplace1 = function(ymat, y, w, rep01) {
stop("response must be a vector or a one-column matrix")
extra$n = n
extra$y.names = y.names =
- paste("tau = ", round(extra$tau, dig = .digt), sep = "")
+ paste("tau = ", round(extra$tau, digits = .digt), sep = "")
extra$individual = FALSE
predictors.names =
namesof(paste("quantile(", y.names, ")", sep = ""),
@@ -4382,11 +4451,11 @@ adjust01.logitlaplace1 = function(ymat, y, w, rep01) {
if (max(y) > 1)
stop("response values greater than 1 detected")
if ((prop.0. <- weighted.mean(1*(y == 0), w)) >= min(extra$tau))
- stop("sample proportion of 0s == ", round(prop.0., dig=4),
- " > minimum 'tau' value. Choose larger values for 'tau'.")
+ stop("sample proportion of 0s == ", round(prop.0., digits = 4),
+ " > minimum 'tau' value. Choose larger values for 'tau'.")
if ((prop.1. <- weighted.mean(1*(y == 1), w)) >= max(extra$tau))
- stop("sample proportion of 1s == ", round(prop.1., dig=4),
- " < maximum 'tau' value. Choose smaller values for 'tau'.")
+ stop("sample proportion of 1s == ", round(prop.1., digits = 4),
+ " < maximum 'tau' value. Choose smaller values for 'tau'.")
if (!length(etastart)) {
if ( .imethod == 1) {
locat.init = quantile(rep(y, w), probs= extra$tau)
@@ -4401,8 +4470,9 @@ adjust01.logitlaplace1 = function(ymat, y, w, rep01) {
}
- locat.init = if (length( .ilocat)) rep( .ilocat, len = M) else
- rep(locat.init, len = M)
+ locat.init = if (length( .ilocat))
+ rep( .ilocat, length.out = M) else
+ rep(locat.init, length.out = M)
locat.init = matrix(locat.init, n, M, byrow = TRUE)
locat.init = abs(locat.init)
etastart =
@@ -4454,7 +4524,8 @@ adjust01.logitlaplace1 = function(ymat, y, w, rep01) {
kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
ymat = matrix(y, extra$n, extra$M)
- ymat = adjust01.logitlaplace1(ymat = ymat, y = y, w = w, rep01 = .rep01)
+ ymat = adjust01.logitlaplace1(ymat = ymat, y = y, w = w,
+ rep01 = .rep01)
w.mat = theta2eta(ymat, .llocat, earg = .elocat) # e.g., logit()
if (residuals) {
stop("loglikelihood residuals not implemented yet")
@@ -4474,7 +4545,8 @@ adjust01.logitlaplace1 = function(ymat, y, w, rep01) {
Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
location.w = eta
kappamat = matrix(extra$kappa, n, M, byrow = TRUE)
- ymat = adjust01.logitlaplace1(ymat = ymat, y = y, w = w, rep01 = .rep01)
+ ymat = adjust01.logitlaplace1(ymat = ymat, y = y, w = w,
+ rep01 = .rep01)
w.mat = theta2eta(ymat, .llocat, earg = .elocat) # e.g., logit()
zedd = abs(w.mat-location.w) / Scale.w
dl.dlocation = ifelse(w.mat >= location.w, kappamat, 1/kappamat) *
diff --git a/R/family.quantal.R b/R/family.quantal.R
index 689e767..14c7821 100644
--- a/R/family.quantal.R
+++ b/R/family.quantal.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -24,7 +24,7 @@
fitted.type <- match.arg(fitted.type,
c("observed", "treatment", "control"),
- several.ok =TRUE)
+ several.ok = TRUE)
if (mode(link0) != "character" && mode(link0) != "name")
@@ -35,9 +35,9 @@
link1 <- as.character(substitute(link1))
if (!is.list(earg1)) earg1 = list()
- if (!is.Numeric(mux.offdiagonal, allow = 1) ||
+ if (!is.Numeric(mux.offdiagonal, allowable.length = 1) ||
mux.offdiagonal >= 1 ||
- mux.offdiagonal < 0)
+ mux.offdiagonal < 0)
stop("argument 'mux.offdiagonal' must be in the interval [0, 1)")
@@ -51,11 +51,11 @@
namesof("prob0", link0, earg = earg0), ", ",
namesof("prob1", link1, earg = earg1)),
constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero # ,
))),
- initialize = eval(substitute(expression({
+ initialize = eval(substitute(expression({
eval(binomialff(link = .link0)@initialize) # w, y, mustart are assigned
@@ -66,16 +66,16 @@
if (is.null(etastart)) {
prob0.init <- if (length( .iprob0 )) {
- rep( .iprob0, len = n)
- } else {
- mustart / 2
- }
+ rep( .iprob0, length.out = n)
+ } else {
+ mustart / 2
+ }
prob1.init <- if (length( .iprob1 )) {
- rep( .iprob1, len = n)
- } else {
- mustart / 2
- }
+ rep( .iprob1, length.out = n)
+ } else {
+ mustart / 2
+ }
mustart <- NULL
@@ -88,6 +88,7 @@
}), list( .link0 = link0, .earg0 = earg0,
.link1 = link1, .earg1 = earg1,
.iprob0 = iprob0, .iprob1 = iprob1 ))),
+
linkinv = eval(substitute(function(eta, extra = NULL) {
prob0 <- eta2theta(eta[, 1], .link0 , earg = .earg0 )
prob1 <- eta2theta(eta[, 2], .link1 , earg = .earg1 )
@@ -112,13 +113,13 @@
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- prob0 <- eta2theta(eta[, 1], .link0, earg = .earg0 )
- prob1 <- eta2theta(eta[, 2], .link1, earg = .earg1 )
+ prob0 <- eta2theta(eta[, 1], .link0 , earg = .earg0 )
+ prob1 <- eta2theta(eta[, 2], .link1 , earg = .earg1 )
mymu = prob0 + (1 - prob0) * prob1
if (residuals) {
- w * (y / mymu - (1-y) / (1 - mymu))
+ w * (y / mymu - (1 - y) / (1 - mymu))
} else {
ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
y * w # Convert proportions to counts
@@ -171,7 +172,7 @@
dl.dprob1 * dprob1.deta)
}), list( .link0 = link0, .earg0 = earg0,
.link1 = link1, .earg1 = earg1 ))),
- weight = eval(substitute(expression({
+ weight = eval(substitute(expression({
ed2l.dmu2 <- 1 / (mymu * (1-mymu))
@@ -195,10 +196,6 @@
ed2l.dprob1prob2 * dprob1.deta * dprob0.deta)
- if (FALSE)
- wz <- cbind(od2l.dprob02 * dprob0.deta^2,
- od2l.dprob12 * dprob1.deta^2,
- od2l.dprob1prob2 * dprob1.deta * dprob0.deta)
c(w) * wz
diff --git a/R/family.rcam.R b/R/family.rcam.R
index bbac38c..ac7ea60 100644
--- a/R/family.rcam.R
+++ b/R/family.rcam.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -15,28 +15,36 @@
+
rcam <- function(y,
family = poissonff,
Rank = 0,
Musual = NULL,
weights = NULL,
+ which.lp = 1,
Index.corner = if (!Rank) NULL else 1 + Musual * (1:Rank),
rprefix = "Row.",
cprefix = "Col.",
+ offset = 0,
szero = if (!Rank) NULL else
{ if (Musual == 1) 1 else
setdiff(1:(Musual*ncol(y)),
c( # 1:Musual,
1 + (1:ncol(y)) * Musual,
Index.corner))},
- summary.arg = FALSE, h.step = 0.0001,
- rbaseline = 1, cbaseline = 1, ...) {
+ summary.arg = FALSE, h.step = 0.0001,
+ rbaseline = 1, cbaseline = 1,
+ ...) {
+ noroweffects = FALSE
+ nocoleffects = FALSE
+ if (!is.Numeric(which.lp, allowable.length = 1, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'which.lp'")
if (!is.character(rprefix))
stop("argument 'rprefix' must be character")
@@ -44,11 +52,11 @@
stop("argument 'cprefix' must be character")
if (is.character(family))
- family <- get(family)
+ family <- get(family)
if (is.function(family))
- family <- ((family)())
+ family <- ((family)())
if (!inherits(family, "vglmff")) {
- stop("'family = ", family, "' is not a VGAM family function")
+ stop("'family = ", family, "' is not a VGAM family function")
}
efamily = family
@@ -79,8 +87,11 @@
eifun <- function(i, n) diag(n)[, i, drop = FALSE]
- .rcam.df <- data.frame("Row.2" = eifun(2, nrow(y)))
- colnames( .rcam.df )<- paste(rprefix, "2", sep = "") # Overwrite "Row.2"
+ .rcam.df <-
+ if (!noroweffects) data.frame("Row.2" = eifun(2, nrow(y))) else
+ if (!nocoleffects) data.frame("Col.2" = eifun(2, nrow(y))) else
+ stop("at least one of 'noroweffects' and 'nocoleffects' must be FALSE")
+ colnames( .rcam.df ) <- paste(rprefix, "2", sep = "") # Overwrite "Row.2"
@@ -89,7 +100,7 @@
warn.save = options()$warn
options(warn = -3) # Suppress the warnings (hopefully, temporarily)
if (any(!is.na(as.numeric(substring(yn1, 1, 1)))))
- yn1 <- paste("X2.", 1:nrow(y), sep = "")
+ yn1 <- paste("X2.", 1:nrow(y), sep = "")
options(warn = warn.save)
@@ -115,6 +126,7 @@
Hlist <- list("(Intercept)" = matrix(1, ncol(y), 1))
+ if (!noroweffects)
for(ii in 2:nrow(y)) {
Hlist[[ paste(rprefix, ii, sep = "")]] <- matrix(1, ncol(y), 1)
@@ -123,6 +135,7 @@
}
+ if (!nocoleffects)
for(ii in 2:ncol(y)) {
@@ -139,9 +152,9 @@
dimnames(.rcam.df) <- list(if (length(dimnames(y)[[1]]))
- dimnames(y)[[1]] else
- as.character(1:nrow(y)),
- dimnames(.rcam.df)[[2]])
+ dimnames(y)[[1]] else
+ as.character(1:nrow(y)),
+ dimnames(.rcam.df)[[2]])
str1 <- paste("~ ", rprefix, "2", sep = "")
@@ -165,7 +178,7 @@
controlfun <- if (Rank == 0) rrvglm.control else rrvglm.control
- controlfun <- if (Rank == 0) vglm.control else rrvglm.control # orig.
+ controlfun <- if (Rank == 0) vglm.control else rrvglm.control # orig.
mycontrol <- controlfun(Rank = Rank,
@@ -187,7 +200,7 @@
if (Rank > 0)
mycontrol$Norrr <- as.formula(str1) # Overwrite this
- assign(".rcam.df", .rcam.df, envir = VGAM:::VGAMenv)
+ assign(".rcam.df", .rcam.df, envir = VGAM::VGAMenv)
warn.save <- options()$warn
options(warn = -3) # Suppress the warnings (hopefully, temporarily)
@@ -198,11 +211,22 @@
if (Musual > 1) {
orig.Hlist <- Hlist
- for (ii in 1:length(Hlist))
- Hlist[[ii]] <- kronecker(Hlist[[ii]], rbind(1, 0))
+ kmat1 = rbind(1, 0)
+ kmat0 = rbind(0, 1)
+
+ kmat1 = matrix(0, nrow = Musual, ncol = 1)
+ kmat1[which.lp, 1] = 1
+ kmat0 = matrix(1, nrow = Musual, ncol = 1)
+ kmat0[which.lp, 1] = 0
+
+ for (ii in 1:length(Hlist)) {
+ Hlist[[ii]] <- kronecker(Hlist[[ii]],
+ kmat1)
+ }
Hlist[["(Intercept)"]] <-
cbind(Hlist[["(Intercept)"]],
- kronecker(matrix(1, nrow(orig.Hlist[[1]]), 1), rbind(0, 1)))
+ kronecker(matrix(1, nrow(orig.Hlist[[1]]), 1),
+ kmat0))
@@ -213,21 +237,24 @@
-
+ offset.matrix = matrix(offset, nrow = nrow(y),
+ ncol = ncol(y) * Musual) # byrow = TRUE
answer <- if (Rank > 0) {
- if (is(object.save, "rrvglm")) object.save else
+ if (is(object.save, "rrvglm")) object.save else
rrvglm(as.formula(str2),
family = family,
constraints = Hlist,
+ offset = offset.matrix,
weights = if (length(weights)) weights else rep(1, length = nrow(y)),
...,
control = mycontrol, data = .rcam.df)
} else {
- if (is(object.save, "vglm")) object.save else
+ if (is(object.save, "vglm")) object.save else
vglm(as.formula(str2),
family = family,
constraints = Hlist,
+ offset = offset.matrix,
weights = if (length(weights)) weights else rep(1, length = nrow(y)),
...,
control = mycontrol, data = .rcam.df)
@@ -242,13 +269,15 @@
} else {
summary(answer)
}
- } else {
+ } else {
as(answer, ifelse(Rank > 0, "rcam", "rcam0"))
}
answer at misc$rbaseline <- rbaseline
answer at misc$cbaseline <- cbaseline
+ answer at misc$which.lp <- which.lp
+ answer at misc$offset <- offset.matrix
answer
}
@@ -402,6 +431,10 @@ setMethod("summary", "rcam",
orig.raxisl <- rownames(object at y)
orig.caxisl <- colnames(object at y)
+ if (is.null(orig.raxisl))
+ orig.raxisl = as.character(1:nrow(object at y))
+ if (is.null(orig.caxisl))
+ orig.caxisl = as.character(1:ncol(object at y))
roweff.orig <-
roweff <- orig.roweff[c(rfirst:last.r,
@@ -430,7 +463,7 @@ setMethod("summary", "rcam",
axis(1, at = 1:length(raxisl),
cex.lab = rcex.lab,
cex.axis = rcex.axis,
- label = raxisl)
+ labels = raxisl)
axis(2, cex.lab = rcex.lab, ...) # las = rlas)
if (hline0)
@@ -446,7 +479,7 @@ setMethod("summary", "rcam",
axis(1, at = 1:length(caxisl),
cex.lab = ccex.lab,
cex.axis = ccex.axis,
- label = caxisl)
+ labels = caxisl)
axis(2, cex.lab = ccex.lab, ...) # las = clas)
if (hline0)
@@ -518,8 +551,8 @@ moffset <- function (mat, roffset = 0, coffset = 0, postfix = "") {
"else character and match the ",
"column names of the response")
- if (!is.Numeric(ind1, positive = TRUE, integ = TRUE, allow = 1) ||
- !is.Numeric(ind2, positive = TRUE, integ = TRUE, allow = 1))
+ if (!is.Numeric(ind1, positive = TRUE, integer.valued = TRUE, allowable.length = 1) ||
+ !is.Numeric(ind2, positive = TRUE, integer.valued = TRUE, allowable.length = 1))
stop("bad input for arguments 'roffset' and/or 'coffset'")
if (ind1 > nrow(mat))
stop("too large a value for argument 'roffset'")
@@ -663,6 +696,104 @@ confint_nb1 <- function(nb1, level = 0.95) {
+plota21 <- function(rrvglm2, plot.it = TRUE, nseq.a21 = 31,
+ se.eachway = c(5, 5), # == c(LHS, RHS),
+ trace.arg = TRUE, ...) {
+
+
+
+
+
+ if (class(rrvglm2) != "rrvglm")
+ stop("argument 'rrvglm2' does not appear to be a rrvglm() object")
+
+ if (rrvglm2 at control$Rank != 1)
+ stop("argument 'rrvglm2' is not Rank-1")
+
+ if (rrvglm2 at misc$M != 2)
+ stop("argument 'rrvglm2' does not have M = 2")
+
+
+ loglik.orig <- logLik(rrvglm2)
+ temp1 <- confint_rrnb(rrvglm2) # zz
+
+ a21.hat <- (Coef(rrvglm2)@A)[2, 1]
+ se.a21.hat <- temp1$se.a21.hat
+
+
+ se.a21.hat <- sqrt(vcov(rrvglm2)["I(lv.mat)", "I(lv.mat)"])
+
+
+ big.ci.a21 <- a21.hat + c(-1, 1) * se.eachway * se.a21.hat
+ seq.a21 <- seq(big.ci.a21[1], big.ci.a21[2], length = nseq.a21)
+ Hlist.orig <- constraints.vlm(rrvglm2, type = "lm")
+
+
+ alreadyComputed <- !is.null(rrvglm2 at post$a21.matrix)
+
+
+ a21.matrix <- if (alreadyComputed) rrvglm2 at post$a21.matrix else
+ cbind(a21 = seq.a21, loglikelihood = 0)
+ prev.etastart <- predict(rrvglm2) # Halves the computing time
+ funname <- "vglm"
+ listcall <- as.list(rrvglm2 at call)
+
+
+ if (!alreadyComputed)
+ for (ii in 1:nseq.a21) {
+ if (trace.arg)
+ print(ii)
+
+ argslist <- vector("list", length(listcall) - 1)
+ for (kay in 2:(length(listcall)))
+ argslist[[kay - 1]] <- listcall[[kay]]
+
+ names(argslist) <- c(names(listcall)[-1])
+
+ argslist$trace = trace.arg
+ argslist$etastart = prev.etastart
+ argslist$constraints = Hlist.orig
+
+
+ for (kay in 2:length(argslist[["constraints"]])) {
+ argslist[["constraints"]][[kay]] <- rbind(1, a21.matrix[ii, 1])
+ }
+
+
+ fitnew = do.call(what = funname, args = argslist)
+
+ a21.matrix[ii, 2] <- logLik(fitnew)
+
+ prev.etastart <- predict(fitnew)
+ }
+
+
+
+ if (plot.it) {
+ plot(a21.matrix[ ,1], a21.matrix[ ,2], type = "l",
+ col = "blue",
+ xlab = expression(a[21]), ylab = "Log-likelihood") # ...
+
+ abline(v = (Hlist.orig[[length(Hlist.orig)]])[2, 1],
+ col = "darkorange", lty = "dashed")
+
+ abline(h = loglik.orig,
+ col = "darkorange", lty = "dashed")
+
+ abline(h = loglik.orig - qchisq(0.95, df = 1),
+ col = "darkorange", lty = "dashed")
+
+ abline(v = a21.hat + c(-1, 1) * 1.96 * se.a21.hat,
+ col = "gray50", lty = "dashed", lwd = 2.0)
+
+ } # End of (plot.it)
+
+ rrvglm2 at post <- list(a21.matrix = a21.matrix)
+ invisible(rrvglm2)
+}
+
+
+
@@ -971,7 +1102,7 @@ summary.qvar <- function(object, ...) {
regularVar = c(object at extra$attributes.y$regularVar)
QuasiVar <- exp(diag(fitted(object))) / 2
- QuasiSE <- sqrt(quasiVar)
+ QuasiSE <- sqrt(QuasiVar)
structure(list(estimate = estimates,
@@ -1067,11 +1198,11 @@ plotqvar <- function(object,
QuasiVar <- exp(diag(fitted(object))) / 2
- QuasiSE <- sqrt(quasiVar)
+ QuasiSE <- sqrt(QuasiVar)
if (!is.numeric(estimates))
stop("Cannot plot, because there are no 'proper' parameter estimates")
- if (!is.numeric(quasiSE))
+ if (!is.numeric(QuasiSE))
stop("Cannot plot, because there are no quasi standard errors")
@@ -1090,7 +1221,7 @@ plotqvar <- function(object,
zedd = abs(qnorm((1 - conf.level) / 2))
lsd.tops <- estimates + zedd * QuasiSE / sqrt(2)
lsd.tails <- estimates - zedd * QuasiSE / sqrt(2)
- if (max(quasiSE) / min(quasiSE) > warn.ratio)
+ if (max(QuasiSE) / min(QuasiSE) > warn.ratio)
warning("Quasi SEs appear to be quite different... the ",
"LSD intervals may not be very accurate")
} else {
diff --git a/R/family.rcqo.R b/R/family.rcqo.R
index 6e32437..f370d54 100644
--- a/R/family.rcqo.R
+++ b/R/family.rcqo.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -28,44 +28,53 @@ rcqo <- function(n, p, S,
rhox = 0.5,
breaks = 4, # ignored unless family="ordinal"
seed = NULL,
- Crow1positive=TRUE,
+ Crow1positive = TRUE,
xmat = NULL, # Can be input
scalelv = TRUE
) {
family = match.arg(family, c("poisson","negbinomial", "binomial-poisson",
"Binomial-negbinomial", "ordinal-poisson",
"Ordinal-negbinomial","gamma2"))[1]
- if (!is.Numeric(n, integer=TRUE, posit=TRUE, allow=1))
- stop("bad input for argument 'n'")
- if (!is.Numeric(p, integer=TRUE, posit=TRUE, allow=1) || p < 1 + Rank)
- stop("bad input for argument 'p'")
- if (!is.Numeric(S, integer=TRUE, posit=TRUE, allow=1))
- stop("bad input for argument 'S'")
- if (!is.Numeric(Rank, integer=TRUE, posit=TRUE, allow=1) || Rank > 4)
- stop("bad input for argument 'Rank'")
- if (!is.Numeric(Kvector, posit=TRUE))
- stop("bad input for argument 'Kvector'")
+
+ if (!is.Numeric(n, integer.valued = TRUE,
+ positive = TRUE, allowable.length = 1))
+ stop("bad input for argument 'n'")
+ if (!is.Numeric(p, integer.valued = TRUE,
+ positive = TRUE, allowable.length = 1) ||
+ p < 1 + Rank)
+ stop("bad input for argument 'p'")
+ if (!is.Numeric(S, integer.valued = TRUE,
+ positive = TRUE, allowable.length = 1))
+ stop("bad input for argument 'S'")
+ if (!is.Numeric(Rank, integer.valued = TRUE,
+ positive = TRUE, allowable.length = 1) ||
+ Rank > 4)
+ stop("bad input for argument 'Rank'")
+ if (!is.Numeric(Kvector, positive = TRUE))
+ stop("bad input for argument 'Kvector'")
if (!is.Numeric(rhox) || abs(rhox) >= 1)
- stop("bad input for argument 'rhox'")
- if (length(seed) && !is.Numeric(seed, integer=TRUE, posit=TRUE))
- stop("bad input for argument 'seed'")
- if (!is.logical(EqualTolerances) || length(EqualTolerances)>1)
- stop("bad input for argument 'EqualTolerances)'")
+ stop("bad input for argument 'rhox'")
+ if (length(seed) &&
+ !is.Numeric(seed, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'seed'")
+ if (!is.logical(EqualTolerances) ||
+ length(EqualTolerances) > 1)
+ stop("bad input for argument 'EqualTolerances)'")
if (!is.logical(sqrt) || length(sqrt)>1)
- stop("bad input for argument 'sqrt)'")
+ stop("bad input for argument 'sqrt)'")
if (family != "negbinomial" && sqrt)
warning("argument 'sqrt' is used only with family='negbinomial'")
- if (!EqualTolerances && !is.Numeric(sdTolerances, posit=TRUE))
+ if (!EqualTolerances && !is.Numeric(sdTolerances, positive = TRUE))
stop("bad input for argument 'sdTolerances'")
- if (!is.Numeric(loabundance, posit=TRUE))
+ if (!is.Numeric(loabundance, positive = TRUE))
stop("bad input for argument 'loabundance'")
- if (!is.Numeric(sdlv, posit=TRUE))
+ if (!is.Numeric(sdlv, positive = TRUE))
stop("bad input for argument 'sdlv'")
- if (!is.Numeric(sdOptima, posit=TRUE))
+ if (!is.Numeric(sdOptima, positive = TRUE))
stop("bad input for argument 'sdOptima'")
if (EqualMaxima && loabundance != hiabundance)
stop("arguments 'loabundance' and 'hiabundance' must ",
- "be equal when 'EqualTolerances=TRUE'")
+ "be equal when 'EqualTolerances = TRUE'")
if (any(loabundance > hiabundance))
stop("loabundance > hiabundance is not allowed")
if (!is.logical(Crow1positive)) {
@@ -108,7 +117,7 @@ rcqo <- function(n, p, S,
} else {
eval(change.seed.expression)
xmat = matrix(rnorm(n*(p-1)), n, p-1) %*% L
- xmat = scale(xmat, center=TRUE)
+ xmat = scale(xmat, center = TRUE)
xnames = paste("x", 2:p, sep="")
dimnames(xmat) = list(as.character(1:n), xnames)
}
@@ -141,7 +150,8 @@ rcqo <- function(n, p, S,
}
}
if (ESOptima) {
- if (!is.Numeric(S^(1/Rank), integ=TRUE) || S^(1/Rank) < 2)
+ if (!is.Numeric(S^(1/Rank), integer.valued = TRUE) ||
+ S^(1/Rank) < 2)
stop("S^(1/Rank) must be an integer greater or equal to 2")
if (Rank == 1) {
optima = matrix(as.numeric(NA), S, Rank)
@@ -197,10 +207,10 @@ rcqo <- function(n, p, S,
eval(change.seed.expression)
logmaxima = runif(S, min=loeta, max=hieta) # loeta and hieta may be vector
names(logmaxima) = ynames
- etamat = matrix(logmaxima,n,S,byrow=TRUE) # eta=log(mu) only; intercept term
+ etamat = matrix(logmaxima,n,S,byrow = TRUE) # eta=log(mu) only; intercept term
for(jay in 1:S) {
- optmat = matrix(optima[jay,], nrow=n, ncol=Rank, byrow=TRUE)
- tolmat = matrix(Tols[jay,], nrow=n, ncol=Rank, byrow=TRUE)
+ optmat = matrix(optima[jay,], nrow=n, ncol=Rank, byrow = TRUE)
+ tolmat = matrix(Tols[jay,], nrow=n, ncol=Rank, byrow = TRUE)
temp = cbind((lvmat - optmat) / tolmat)
for(r in 1:Rank)
etamat[,jay]=etamat[,jay]-0.5*(lvmat[,r] - optmat[jay,r])*temp[,r]
@@ -212,13 +222,13 @@ rcqo <- function(n, p, S,
"gamma2"=3)
eval(change.seed.expression)
if (rootdist == 1) {
- ymat = matrix(rpois(n*S, lam=exp(etamat)), n, S)
+ ymat = matrix(rpois(n*S, lambda = exp(etamat)), n, S)
} else if (rootdist == 2) {
- mKvector = matrix(Kvector, n, S, byrow=TRUE)
+ mKvector = matrix(Kvector, n, S, byrow = TRUE)
ymat = matrix(rnbinom(n=n*S, mu=exp(etamat), size=mKvector),n,S)
if (sqrt) ymat = ymat^0.5
} else if (rootdist == 3) {
- Shape = matrix(Shape, n, S, byrow=TRUE)
+ Shape = matrix(Shape, n, S, byrow = TRUE)
ymat = matrix(rgamma(n*S, shape=Shape, scale=exp(etamat)/Shape),n,S)
if (Log) ymat = log(ymat)
} else stop("argument 'rootdist' unmatched")
@@ -291,19 +301,24 @@ dcqo <- function(x, p, S,
family = as.character(substitute(family))
family = match.arg(family, c("poisson", "binomial",
"negbinomial", "ordinal"))[1]
- if (!is.Numeric(p, integer=TRUE, posit=TRUE, allow=1) || p < 2)
- stop("bad input for argument 'p'")
- if (!is.Numeric(S, integer=TRUE, posit=TRUE, allow=1))
- stop("bad input for argument 'S'")
- if (!is.Numeric(Rank, integer=TRUE, posit=TRUE, allow=1))
- stop("bad input for argument 'Rank'")
- if (length(seed) && !is.Numeric(seed, integer=TRUE, posit=TRUE))
- stop("bad input for argument 'seed'")
+ if (!is.Numeric(p, integer.valued = TRUE,
+ positive = TRUE, allowable.length = 1) ||
+ p < 2)
+ stop("bad input for argument 'p'")
+ if (!is.Numeric(S, integer.valued = TRUE,
+ positive = TRUE, allowable.length = 1))
+ stop("bad input for argument 'S'")
+ if (!is.Numeric(Rank, integer.valued = TRUE,
+ positive = TRUE, allowable.length = 1))
+ stop("bad input for argument 'Rank'")
+ if (length(seed) &&
+ !is.Numeric(seed, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'seed'")
if (!is.logical(EqualTolerances) || length(EqualTolerances)>1)
- stop("bad input for argument 'EqualTolerances)'")
+ stop("bad input for argument 'EqualTolerances)'")
if (EqualMaxima && loabundance != hiabundance)
- stop("'loabundance' and 'hiabundance' must ",
- "be equal when 'EqualTolerances=TRUE'")
+ stop("'loabundance' and 'hiabundance' must ",
+ "be equal when 'EqualTolerances = TRUE'")
if (length(seed)) set.seed(seed)
xmat = matrix(rnorm(n*(p-1)), n, p-1, dimnames=list(as.character(1:n),
@@ -317,10 +332,10 @@ dcqo <- function(x, p, S,
hieta = log(hiabundance)
logmaxima = runif(S, min=loeta, max=hieta)
- etamat = matrix(logmaxima,n,S,byrow=TRUE) # eta=log(mu) only; intercept term
+ etamat = matrix(logmaxima,n,S,byrow = TRUE) # eta=log(mu) only; intercept term
for(jay in 1:S) {
- optmat = matrix(optima[jay,], n, Rank, byrow=TRUE)
- tolmat = matrix(Tols[jay,], n, Rank, byrow=TRUE)
+ optmat = matrix(optima[jay,], n, Rank, byrow = TRUE)
+ tolmat = matrix(Tols[jay,], n, Rank, byrow = TRUE)
temp = cbind((lvmat - optmat) * tolmat)
for(r in 1:Rank)
etamat[,jay] = etamat[,jay] - 0.5 * temp[,r] *
@@ -332,7 +347,7 @@ dcqo <- function(x, p, S,
} else {
- matrix(rpois(n*S, lam=exp(etamat)), n, S)
+ matrix(rpois(n*S, lambda = exp(etamat)), n, S)
}
if (family == "binomial")
ymat = 0 + (ymat > 0)
diff --git a/R/family.robust.R b/R/family.robust.R
index b17eeab..a01fbf2 100644
--- a/R/family.robust.R
+++ b/R/family.robust.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -13,7 +13,8 @@
edhuber <- function(x, k = 0.862, mu = 0, sigma = 1, log = FALSE) {
- if (!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
rm(log)
zedd <- (x - mu) / sigma
@@ -44,7 +45,8 @@ dhuber <- function(x, k = 0.862, mu = 0, sigma = 1, log = FALSE)
rhuber <- function(n, k = 0.862, mu = 0, sigma = 1) {
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ=TRUE, allow = 1, posit = TRUE))
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
myl <- rep(0.0, len = use.n)
@@ -87,17 +89,19 @@ rhuber <- function(n, k = 0.862, mu = 0, sigma = 1) {
qhuber <- function (p, k = 0.862, mu = 0, sigma = 1)
{
- if(min(sigma) <= 0) stop("'sigma' must be positive")
- if(min(k) <= 0) stop("'k' must be positive")
-
- cnorm <- sqrt(2 * pi) * ((2 * pnorm(k) - 1) + 2 * dnorm(k) / k)
- x <- pmin(p, 1 - p)
- q <- ifelse(x <= sqrt(2 * pi) * dnorm(k) / ( k * cnorm),
- log(k * cnorm * x) / k - k / 2,
- qnorm(abs(1 - pnorm(k) + x * cnorm / sqrt(2 * pi) -
- dnorm(k) / k)))
- ifelse(p < 0.5, mu + q * sigma,
- mu - q * sigma)
+ if(min(sigma) <= 0)
+ stop("argument 'sigma' must be positive")
+ if(min(k) <= 0)
+ stop("argument 'k' must be positive")
+
+ cnorm <- sqrt(2 * pi) * ((2 * pnorm(k) - 1) + 2 * dnorm(k) / k)
+ x <- pmin(p, 1 - p)
+ q <- ifelse(x <= sqrt(2 * pi) * dnorm(k) / ( k * cnorm),
+ log(k * cnorm * x) / k - k / 2,
+ qnorm(abs(1 - pnorm(k) + x * cnorm / sqrt(2 * pi) -
+ dnorm(k) / k)))
+ ifelse(p < 0.5, mu + q * sigma,
+ mu - q * sigma)
}
@@ -105,7 +109,8 @@ qhuber <- function (p, k = 0.862, mu = 0, sigma = 1)
phuber <- function(q, k = 0.862, mu = 0, sigma = 1)
{
- if (any(sigma <= 0)) stop("sigma must be positive")
+ if (any(sigma <= 0))
+ stop("argument 'sigma' must be positive")
A1 <- (2 * dnorm(k) / k - 2 * pnorm(-k))
eps <- A1 / (1 + A1)
@@ -130,131 +135,133 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1)
A1 <- (2 * dnorm(k) / k - 2 * pnorm(-k))
eps <- A1 / (1 + A1)
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 4)
stop("argument 'imethod' must be 1 or 2 or 3 or 4")
- if (!is.Numeric(k, allow = 1, posit = TRUE))
- stop("bad input for argument 'k'")
+ if (!is.Numeric(k, allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'k'")
if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
+ llocation = as.character(substitute(llocation))
if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
- stop("bad input for argument 'zero'")
+ lscale = as.character(substitute(lscale))
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
if (!is.list(elocation)) elocation = list()
if (!is.list(escale)) escale = list()
new("vglmff",
- blurb = c("Huber least favorable distribution\n\n",
- "Links: ",
- namesof("location", llocation, earg = elocation), ", ",
- namesof("scale", lscale, earg = escale), "\n\n",
- "Mean: location"),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- predictors.names <-
- c(namesof("location", .llocat, earg = .elocat, tag = FALSE),
- namesof("scale", .lscale, earg = .escale, tag = FALSE))
- if (ncol(y <- cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- if (!length(etastart)) {
- junk = lm.wfit(x = x, y = y, w = w)
- scale.y.est <- sqrt( sum(w * junk$resid^2) / junk$df.residual )
- location.init <- if ( .llocat == "loge") pmax(1/1024, y) else {
- if ( .imethod == 3) {
- rep(weighted.mean(y, w), len = n)
- } else if ( .imethod == 2) {
- rep(median(rep(y, w)), len = n)
- } else if ( .imethod == 1) {
- junk$fitted
- } else {
- y
- }
- }
- etastart <- cbind(
- theta2eta(location.init, .llocat, earg = .elocat),
- theta2eta(scale.y.est, .lscale, earg = .escale))
+ blurb = c("Huber least favorable distribution\n\n",
+ "Links: ",
+ namesof("location", llocation, earg = elocation), ", ",
+ namesof("scale", lscale, earg = escale), "\n\n",
+ "Mean: location"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ predictors.names <-
+ c(namesof("location", .llocat, earg = .elocat, tag = FALSE),
+ namesof("scale", .lscale, earg = .escale, tag = FALSE))
+ if (ncol(y <- cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ if (!length(etastart)) {
+ junk = lm.wfit(x = x, y = y, w = w)
+ scale.y.est <- sqrt( sum(w * junk$resid^2) / junk$df.residual )
+ location.init <- if ( .llocat == "loge") pmax(1/1024, y) else {
+ if ( .imethod == 3) {
+ rep(weighted.mean(y, w), len = n)
+ } else if ( .imethod == 2) {
+ rep(median(rep(y, w)), len = n)
+ } else if ( .imethod == 1) {
+ junk$fitted
+ } else {
+ y
+ }
}
- }), list( .llocat = llocation, .lscale = lscale,
- .elocat = elocation, .escale = escale,
- .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[,1], .llocat, earg = .elocat)
- }, list( .llocat = llocation,
- .elocat = elocation, .escale = escale ))),
- last = eval(substitute(expression({
- misc$link <- c("location" = .llocat, "scale" = .lscale)
- misc$earg <- list("location" = .elocat, "scale" = .escale)
- misc$expected <- TRUE
- misc$k.huber <- .k
- misc$imethod <- .imethod
- }), list( .llocat = llocation, .lscale = lscale,
- .elocat = elocation, .escale = escale,
- .k = k, .imethod = imethod ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- location <- eta2theta(eta[,1], .llocat, earg = .elocat)
- myscale <- eta2theta(eta[,2], .lscale, earg = .escale)
- kay <- .k
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dhuber(y, k = kay, mu = location, sigma = myscale,
- log = TRUE))
- }
- }, list( .llocat = llocation, .lscale = lscale,
+ etastart <- cbind(
+ theta2eta(location.init, .llocat, earg = .elocat),
+ theta2eta(scale.y.est, .lscale, earg = .escale))
+ }
+ }), list( .llocat = llocation, .lscale = lscale,
.elocat = elocation, .escale = escale,
- .k = k ))),
- vfamily = c("huber"),
- deriv = eval(substitute(expression({
- mylocat <- eta2theta(eta[,1], .llocat, earg = .elocat)
- myscale <- eta2theta(eta[,2], .lscale, earg = .escale)
- myk <- .k
-
- zedd <- (y - mylocat) / myscale
- cond2 <- (abs(zedd) <= myk)
- cond3 <- (zedd > myk)
-
- dl.dlocat <- -myk + 0 * zedd # cond1
- dl.dlocat[cond2] <- zedd[cond2]
- dl.dlocat[cond3] <- myk # myk is a scalar
- dl.dlocat <- dl.dlocat / myscale
-
-
- dl.dscale <- (-myk * zedd)
- dl.dscale[cond2] <- (zedd^2)[cond2]
- dl.dscale[cond3] <- ( myk * zedd)[cond3]
- dl.dscale <- (-1 + dl.dscale) / myscale
-
- dlocat.deta <- dtheta.deta(mylocat, .llocat, earg = .elocat)
- dscale.deta <- dtheta.deta(myscale, .lscale, earg = .escale)
- ans <-
- c(w) * cbind(dl.dlocat * dlocat.deta,
- dl.dscale * dscale.deta)
- ans
- }), list( .llocat = llocation, .lscale = lscale,
- .elocat = elocation, .escale = escale,
- .eps = eps, .k = k ))),
- weight = eval(substitute(expression({
- wz <- matrix(as.numeric(NA), n, 2) # diag matrix; y is one-col too
+ .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[,1], .llocat, earg = .elocat)
+ }, list( .llocat = llocation,
+ .elocat = elocation, .escale = escale ))),
+ last = eval(substitute(expression({
+ misc$link <- c("location" = .llocat, "scale" = .lscale)
+ misc$earg <- list("location" = .elocat, "scale" = .escale)
+ misc$expected <- TRUE
+ misc$k.huber <- .k
+ misc$imethod <- .imethod
+ }), list( .llocat = llocation, .lscale = lscale,
+ .elocat = elocation, .escale = escale,
+ .k = k, .imethod = imethod ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ location <- eta2theta(eta[,1], .llocat, earg = .elocat)
+ myscale <- eta2theta(eta[,2], .lscale, earg = .escale)
+ kay <- .k
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * dhuber(y, k = kay, mu = location, sigma = myscale,
+ log = TRUE))
+ }
+ }, list( .llocat = llocation, .lscale = lscale,
+ .elocat = elocation, .escale = escale,
+ .k = k ))),
+ vfamily = c("huber"),
+ deriv = eval(substitute(expression({
+ mylocat <- eta2theta(eta[,1], .llocat, earg = .elocat)
+ myscale <- eta2theta(eta[,2], .lscale, earg = .escale)
+ myk <- .k
+
+ zedd <- (y - mylocat) / myscale
+ cond2 <- (abs(zedd) <= myk)
+ cond3 <- (zedd > myk)
+
+ dl.dlocat <- -myk + 0 * zedd # cond1
+ dl.dlocat[cond2] <- zedd[cond2]
+ dl.dlocat[cond3] <- myk # myk is a scalar
+ dl.dlocat <- dl.dlocat / myscale
+
+
+ dl.dscale <- (-myk * zedd)
+ dl.dscale[cond2] <- (zedd^2)[cond2]
+ dl.dscale[cond3] <- ( myk * zedd)[cond3]
+ dl.dscale <- (-1 + dl.dscale) / myscale
+
+ dlocat.deta <- dtheta.deta(mylocat, .llocat, earg = .elocat)
+ dscale.deta <- dtheta.deta(myscale, .lscale, earg = .escale)
+ ans <-
+ c(w) * cbind(dl.dlocat * dlocat.deta,
+ dl.dscale * dscale.deta)
+ ans
+ }), list( .llocat = llocation, .lscale = lscale,
+ .elocat = elocation, .escale = escale,
+ .eps = eps, .k = k ))),
+ weight = eval(substitute(expression({
+ wz <- matrix(as.numeric(NA), n, 2) # diag matrix; y is one-col too
- temp4 <- erf(myk / sqrt(2))
- ed2l.dlocat2 <- temp4 * (1 - .eps) / myscale^2
+ temp4 <- erf(myk / sqrt(2))
+ ed2l.dlocat2 <- temp4 * (1 - .eps) / myscale^2
- ed2l.dscale2 <- (dnorm(myk) * (1 - myk^2) + temp4) *
- 2 * (1 - .eps) / (myk * myscale^2)
+ ed2l.dscale2 <- (dnorm(myk) * (1 - myk^2) + temp4) *
+ 2 * (1 - .eps) / (myk * myscale^2)
- wz[, iam(1,1,M)] <- ed2l.dlocat2 * dlocat.deta^2
- wz[, iam(2,2,M)] <- ed2l.dscale2 * dscale.deta^2
- ans
- c(w) * wz
- }), list( .eps = eps ))))
+ wz[, iam(1,1,M)] <- ed2l.dlocat2 * dlocat.deta^2
+ wz[, iam(2,2,M)] <- ed2l.dscale2 * dscale.deta^2
+ ans
+ c(w) * wz
+ }), list( .eps = eps ))))
}
@@ -266,15 +273,14 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1)
imethod = 1) {
- print("hi 20110802")
A1 <- (2 * dnorm(k) / k - 2 * pnorm(-k))
eps <- A1 / (1 + A1)
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 4)
stop("argument 'imethod' must be 1 or 2 or 3 or 4")
- if (!is.Numeric(k, allow = 1, posit = TRUE))
+ if (!is.Numeric(k, allowable.length = 1, positive = TRUE))
stop("bad input for argument 'k'")
if (mode(llocation) != "character" && mode(llocation) != "name")
diff --git a/R/family.rrr.R b/R/family.rrr.R
index 24b8961..263cb13 100644
--- a/R/family.rrr.R
+++ b/R/family.rrr.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -15,7 +15,7 @@ replace.constraints = function(Blist, cm, index) {
valt.control <- function(
- Alphavec=c(2, 4, 6, 9, 12, 16, 20, 25, 30, 40, 50,
+ Alphavec = c(2, 4, 6, 9, 12, 16, 20, 25, 30, 40, 50,
60, 80, 100, 125, 2^(8:12)),
Criterion = c("rss", "coefficients"),
Linesearch = FALSE, Maxit = 7,
@@ -27,12 +27,12 @@ replace.constraints = function(Blist, cm, index) {
Criterion <- as.character(substitute(Criterion))
Criterion <- match.arg(Criterion, c("rss", "coefficients"))[1]
- list(Alphavec=Alphavec,
+ list(Alphavec = Alphavec,
Criterion = Criterion,
- Linesearch=Linesearch,
- Maxit=Maxit,
- Suppress.warning=Suppress.warning,
- Tolerance=Tolerance)
+ Linesearch = Linesearch,
+ Maxit = Maxit,
+ Suppress.warning = Suppress.warning,
+ Tolerance = Tolerance)
}
@@ -41,7 +41,7 @@ qrrvglm.xprod = function(numat, Aoffset, Quadratic, ITolerances) {
Rank = ncol(numat)
moff = NULL
ans = if (Quadratic) {
- index = iam(NA, NA, M=Rank, diagonal = TRUE, both = TRUE)
+ index = iam(NA, NA, M = Rank, diag = TRUE, both = TRUE)
temp1 = cbind(numat[,index$row] * numat[,index$col])
if (ITolerances) {
moff = 0
@@ -62,15 +62,15 @@ qrrvglm.xprod = function(numat, Aoffset, Quadratic, ITolerances) {
valt <- function(x, z, U, Rank = 1,
Blist = NULL,
Cinit = NULL,
- Alphavec=c(2, 4, 6, 9, 12, 16, 20, 25, 30, 40, 50,
+ Alphavec = c(2, 4, 6, 9, 12, 16, 20, 25, 30, 40, 50,
60, 80, 100, 125, 2^(8:12)),
- Criterion=c("rss", "coefficients"),
- Crow1positive = rep(TRUE, len = Rank),
+ Criterion = c("rss", "coefficients"),
+ Crow1positive = rep(TRUE, length.out = Rank),
colx1.index,
Linesearch = FALSE,
- Maxit=20,
+ Maxit = 20,
szero = NULL,
- SD.Cinit=0.02,
+ SD.Cinit = 0.02,
Suppress.warning = FALSE,
Tolerance = 1e-6,
trace = FALSE,
@@ -155,7 +155,7 @@ qrrvglm.xprod = function(numat, Aoffset, Quadratic, ITolerances) {
evnu$vector %*% evnu$value^(-0.5)
C = C %*% temp7
A = A %*% t(solve(temp7))
- temp8 = crow1C(cmat=C, Crow1positive, amat=A)
+ temp8 = crow1C(cmat = C, Crow1positive, amat = A)
C = temp8$cmat
A = temp8$amat
@@ -307,8 +307,8 @@ valt.2iter <- function(x, z, U, Blist, A, control) {
clist1 = replace.constraints(Blist, A, control$colx2.index)
- fit <- vlm.wfit(xmat=x, z, Blist=clist1, U=U, matrix.out = TRUE,
- is.vlmX = FALSE, rss = TRUE, qr = FALSE, xij=control$xij)
+ fit <- vlm.wfit(xmat=x, z, Blist=clist1, U = U, matrix.out = TRUE,
+ is.vlmX = FALSE, rss = TRUE, qr = FALSE, xij = control$xij)
C = fit$mat.coef[control$colx2.index,,drop = FALSE] %*% A %*% solve(t(A) %*% A)
list(A=A, C=C, fitted=fit$fitted, new.coeffs = fit$coef,
@@ -342,24 +342,24 @@ valt.1iter = function(x, z, U, Blist, C, control, lp.names = NULL, nice31 = FALS
clist2 = NULL # for vlm.wfit
- i5 = rep(0, len = MSratio)
+ i5 = rep(0, length.out = MSratio)
for(ii in 1:NOS) {
i5 = i5 + 1:MSratio
tmp100 = vlm.wfit(xmat=new.lv.model.matrix, zedd[,i5,drop = FALSE],
- Blist=clist2, U=U[i5,,drop = FALSE],
+ Blist=clist2, U = U[i5,,drop = FALSE],
matrix.out = TRUE, is.vlmX = FALSE, rss = TRUE,
qr = FALSE, Eta.range = control$Eta.range,
- xij=control$xij, lp.names=lp.names[i5])
+ xij = control$xij, lp.names=lp.names[i5])
fit$rss = fit$rss + tmp100$rss
fit$mat.coef = cbind(fit$mat.coef, tmp100$mat.coef)
fit$fitted.values = cbind(fit$fitted.values, tmp100$fitted.values)
}
} else {
- fit = vlm.wfit(xmat=new.lv.model.matrix, zedd, Blist=clist2, U=U,
+ fit = vlm.wfit(xmat=new.lv.model.matrix, zedd, Blist=clist2, U = U,
matrix.out = TRUE, is.vlmX = FALSE, rss = TRUE, qr = FALSE,
Eta.range = control$Eta.range,
- xij=control$xij, lp.names=lp.names)
+ xij = control$xij, lp.names=lp.names)
}
A = if (tmp833$NoA) matrix(0, M, Rank) else
t(fit$mat.coef[1:Rank,,drop = FALSE])
@@ -412,9 +412,9 @@ rrr.init.expression <- expression({
M = 2 * ifelse(is.matrix(y), ncol(y), 1)
control$szero =
- rrcontrol$szero = seq(from=2, to=M, by=2) # Handles A
+ rrcontrol$szero = seq(from = 2, to=M, by = 2) # Handles A
control$Dzero =
- rrcontrol$Dzero = seq(from=2, to=M, by=2) # Handles D
+ rrcontrol$Dzero = seq(from = 2, to=M, by = 2) # Handles D
}
@@ -553,9 +553,9 @@ rrr.end.expression = expression({
C = Cmat, control=control)
lv.mat = tmp300$lv.mat # Needed at the top of new.s.call
- lm2vlm.model.matrix(tmp300$new.lv.model.matrix,B.list,xij=control$xij)
+ lm2vlm.model.matrix(tmp300$new.lv.model.matrix,B.list,xij = control$xij)
} else {
- lm2vlm.model.matrix(x, Blist, xij=control$xij)
+ lm2vlm.model.matrix(x, Blist, xij = control$xij)
}
@@ -572,10 +572,11 @@ rrr.end.expression = expression({
deriv.mu <- eval(family at deriv)
wz <- eval(family at weight)
if (control$checkwz)
- wz = checkwz(wz, M=M, trace = trace, wzeps=control$wzepsilon)
- U <- vchol(wz, M=M, n=n, silent=!trace)
- tvfor <- vforsub(U, as.matrix(deriv.mu), M=M, n=n)
- z <- eta + vbacksub(U, tvfor, M=M, n=n) - offset # Contains \bI \bnu
+ wz = checkwz(wz, M = M, trace = trace,
+ wzepsilon = control$wzepsilon)
+ U <- vchol(wz, M = M, n = n, silent=!trace)
+ tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
+ z <- eta + vbacksub(U, tvfor, M = M, n = n) - offset # Contains \bI \bnu
@@ -643,12 +644,12 @@ rrr.derivative.expression <- expression({
gr = if (control$GradientFunction) calldcqo else NULL,
method=which.optimizer,
control=list(fnscale = 1,trace=as.integer(control$trace),
- parscale=rep(control$Parscale, len = length(Cmat)),
- maxit=250),
+ parscale=rep(control$Parscale, length.out = length(Cmat)),
+ maxit = 250),
etamat=eta, xmat=x, ymat=y, wvec=w,
X_vlm_1save = if (nice31) NULL else X_vlm_1save,
modelno=modelno, Control=control,
- n=n, M=M, p1star=p1star, p2star=p2star, nice31=nice31)
+ n = n, M = M, p1star=p1star, p2star=p2star, nice31=nice31)
if (zthere <- exists(".VGAM.z", envir = VGAM:::VGAMenv)) {
@@ -675,8 +676,8 @@ rrr.derivative.expression <- expression({
maxit=rrcontrol$Maxit,
abstol=rrcontrol$Abstol,
reltol=use.reltol),
- U=U, z= if (control$ITolerances) z+offset else z,
- M=M, xmat=x, # varbix2=varbix2,
+ U = U, z= if (control$ITolerances) z+offset else z,
+ M = M, xmat=x, # varbix2=varbix2,
Blist = Blist, rrcontrol = rrcontrol)
}
@@ -695,7 +696,7 @@ rrr.derivative.expression <- expression({
}
- alt = valt.1iter(x=x, z=z, U=U, Blist = Blist, C = Cmat, nice31=nice31,
+ alt = valt.1iter(x=x, z=z, U = U, Blist = Blist, C = Cmat, nice31=nice31,
control = rrcontrol, lp.names=predictors.names)
@@ -774,10 +775,10 @@ rrr.derivC.rss = function(theta, U, z, M, xmat, Blist, rrcontrol,
vlm.wfit(xmat=tmp700$new.lv.model.matrix, zmat=z,
- Blist = Blist, ncolx=ncol(xmat), U=U, only.rss = TRUE,
+ Blist = Blist, ncolx=ncol(xmat), U = U, only.rss = TRUE,
matrix.out = FALSE, is.vlmX = FALSE, rss= TRUE, qr = FALSE,
Eta.range = rrcontrol$Eta.range,
- xij=rrcontrol$xij)$rss
+ xij = rrcontrol$xij)$rss
}
@@ -832,7 +833,7 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
stop("'varlvI' must be TRUE or FALSE")
if (length(reference) > 1) stop("'reference' must be of length 0 or 1")
if (length(reference) && is.Numeric(reference))
- if (!is.Numeric(reference, allow = 1, integ = TRUE))
+ if (!is.Numeric(reference, allowable.length = 1, integer.valued = TRUE))
stop("bad input for argument 'reference'")
if (!is.logical(ConstrainedQO <- object at control$ConstrainedQO))
stop("cannot determine whether the model is constrained or not")
@@ -923,7 +924,7 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
Amat = Amat %*% Mmat
}
if (length(Cmat)) {
- temp800 = crow1C(Cmat, ocontrol$Crow1positive, amat=Amat)
+ temp800 = crow1C(Cmat, ocontrol$Crow1positive, amat = Amat)
Cmat = temp800$cmat
Amat = temp800$amat
}
@@ -946,7 +947,7 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
Mmat = solve(t(evnu$vector))
Cmat = Cmat %*% evnu$vector # == Cmat %*% solve(t(Mmat))
Amat = Amat %*% Mmat
- temp800 = crow1C(Cmat, ocontrol$Crow1positive, amat=Amat)
+ temp800 = crow1C(Cmat, ocontrol$Crow1positive, amat = Amat)
Cmat = temp800$cmat
Amat = temp800$amat
eval(adjust.Dmat.expression)
@@ -962,7 +963,7 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
Mmat = if (Rank > 1) diag(sdnumat) else matrix(sdnumat, 1, 1)
Cmat = Cmat %*% solve(t(Mmat))
Amat = Amat %*% Mmat
- temp800 = crow1C(Cmat, ocontrol$Crow1positive, amat=Amat)
+ temp800 = crow1C(Cmat, ocontrol$Crow1positive, amat = Amat)
Cmat = temp800$cmat
Amat = temp800$amat
eval(adjust.Dmat.expression)
@@ -978,10 +979,10 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
Amat[ii,,drop = FALSE] %*% optimum[,ii,drop = FALSE] +
t(optimum[,ii,drop = FALSE]) %*%
Darray[,,ii,drop= TRUE] %*% optimum[,ii,drop = FALSE]
- mymax = object at family@linkinv(rbind(eta.temp), extra=object at extra)
+ mymax = object at family@linkinv(rbind(eta.temp), extra = object at extra)
c(mymax) # Convert from matrix to vector
} else {
- 5 * rep(as.numeric(NA), len = M) # Make "numeric"
+ 5 * rep(as.numeric(NA), length.out = M) # Make "numeric"
}
names(maximum) = ynames
@@ -1048,7 +1049,7 @@ Coef.qrrvglm <- function(object, varlvI = FALSE, reference = NULL, ...) {
setClass(Class = "Coef.rrvglm", representation(
"A" = "matrix",
- "B1" = "matrix", # This may be unassigned if p1=0.
+ "B1" = "matrix", # This may be unassigned if p1 = 0.
"C" = "matrix",
"Rank" = "numeric",
"colx1.index" = "numeric",
@@ -1076,7 +1077,7 @@ setClass(Class = "Coef.qrrvglm", representation(
"C" = "matrix"),
contains = "Coef.uqo")
-printCoef.qrrvglm = function(x, ...) {
+show.Coef.qrrvglm = function(x, ...) {
object = x
Rank = object at Rank
@@ -1085,14 +1086,15 @@ printCoef.qrrvglm = function(x, ...) {
mymat = matrix(as.numeric(NA), NOS, Rank)
if (Rank == 1) { # || object at Diagonal
for(ii in 1:NOS) {
- fred = if (Rank>1) diag(object at Tolerance[,,ii,drop = F]) else
+ fred = if (Rank>1) diag(object at Tolerance[,,ii,drop = FALSE]) else
object at Tolerance[,,ii]
if (all(fred > 0))
mymat[ii,] = sqrt(fred)
}
dimnames(mymat) = list(dimnames(object at Tolerance)[[3]],
if (Rank == 1) "lv" else
- paste("Tolerance", dimnames(mymat)[[2]], sep = ""))
+ paste("Tolerance", dimnames(mymat)[[2]],
+ sep = ""))
} else {
for(ii in 1:NOS) {
fred = eigen(object at Tolerance[,,ii])
@@ -1104,9 +1106,11 @@ printCoef.qrrvglm = function(x, ...) {
}
dimnames(object at A) = list(dimnames(object at A)[[1]],
- if (Rank > 1) paste("A", dimnames(object at A)[[2]], sep = ".") else "A")
+ if (Rank > 1) paste("A", dimnames(object at A)[[2]], sep = ".") else
+ "A")
- Maximum = if (length(object at Maximum)) cbind(Maximum=object at Maximum) else NULL
+ Maximum = if (length(object at Maximum))
+ cbind(Maximum = object at Maximum) else NULL
if (length(Maximum) && length(mymat) && Rank == 1)
Maximum[is.na(mymat),] = NA
@@ -1138,21 +1142,29 @@ printCoef.qrrvglm = function(x, ...) {
}
+
+
+
setMethod("show", "Coef.qrrvglm", function(object)
- printCoef.qrrvglm(object))
-setMethod("print", "Coef.qrrvglm", function(x, ...)
- printCoef.qrrvglm(x, ...))
+ show.Coef.qrrvglm(object))
+
+
+
+
+
+
+
setMethod("summary", "qrrvglm", function(object, ...)
summary.qrrvglm(object, ...))
predictqrrvglm <- function(object,
newdata = NULL,
- type=c("link", "response", "lv", "terms"),
+ type = c("link", "response", "lv", "terms"),
se.fit = FALSE,
- deriv=0,
+ deriv = 0,
dispersion = NULL,
- extra=object at extra,
+ extra = object at extra,
varlvI = FALSE, reference = NULL, ...)
{
if (se.fit)
@@ -1190,7 +1202,7 @@ predictqrrvglm <- function(object,
attr(X, "assign") = attrassignlm(X, tt)
} else {
if (is.smart(object) && length(object at smart.prediction)) {
- setup.smart("read", smart.prediction=object at smart.prediction)
+ setup.smart("read", smart.prediction = object at smart.prediction)
}
tt <- object at terms$terms # terms(object) # 11/8/03; object at terms$terms
@@ -1244,7 +1256,7 @@ predictqrrvglm <- function(object,
thisSpecies = whichSpecies[sppno]
Dmat = matrix(Coefs at D[,,thisSpecies], Rank, Rank)
etamat[,thisSpecies] = etamat[,thisSpecies] +
- mux34(lvmat, Dmat, symm = TRUE)
+ mux34(lvmat, Dmat, symmetric = TRUE)
}
} else {
etamat = object at predictors
@@ -1300,7 +1312,7 @@ setMethod("residuals", "qrrvglm", function(object, ...)
-printrrvglm <- function(x, ...)
+show.rrvglm <- function(x, ...)
{
if (!is.null(cl <- x at call)) {
cat("Call:\n")
@@ -1331,7 +1343,7 @@ printrrvglm <- function(x, ...)
cat("\n")
if (length(deviance(x)))
- cat("Residual Deviance:", format(deviance(x)), "\n")
+ cat("Residual deviance:", format(deviance(x)), "\n")
if (length(vll <- logLik.vlm(x)))
cat("Log-likelihood:", format(vll), "\n")
@@ -1348,9 +1360,9 @@ printrrvglm <- function(x, ...)
-setMethod("print", "rrvglm", function(x, ...) printrrvglm(x, ...))
-setMethod("show", "rrvglm", function(object) printrrvglm(object))
+
+setMethod("show", "rrvglm", function(object) show.rrvglm(object))
@@ -1376,7 +1388,7 @@ summary.rrvglm <- function(object, correlation = FALSE,
- if (!is.Numeric(h.step, allow = 1) || abs(h.step)>1)
+ if (!is.Numeric(h.step, allowable.length = 1) || abs(h.step)>1)
stop("bad input for 'h.step'")
if (!object at control$Corner)
@@ -1435,8 +1447,9 @@ summary.rrvglm <- function(object, correlation = FALSE,
dispersion <- tmp5$rss / answer at df[2] # Estimate
}
- answer at coef3 <- get.rrvglm.se2(answer at cov.unscaled, dispersion=dispersion,
- coef=tmp5$coefficients)
+ answer at coef3 <- get.rrvglm.se2(answer at cov.unscaled,
+ dispersion = dispersion,
+ coefficients = tmp5$coefficients)
answer at dispersion <- dispersion
answer at sigma <- dispersion^0.5
@@ -1478,8 +1491,10 @@ get.rrvglm.se1 = function(fit, omit13 = FALSE, kill.all = FALSE,
Rank <- fit at control$Rank # fit at misc$Nested.Rank
Amat <- fit at constraints[[colx2.index[1]]]
- B1mat =if (p1) coefvlm(fit,mat = TRUE)[colx1.index,,drop = FALSE] else NULL
- C.try <- coefvlm(fit, mat= TRUE)[colx2.index,,drop = FALSE]
+ B1mat =if (p1)
+ coefvlm(fit, matrix.out = TRUE)[colx1.index, , drop = FALSE] else
+ NULL
+ C.try <- coefvlm(fit, matrix.out= TRUE)[colx2.index, , drop = FALSE]
Cmat <- C.try %*% Amat %*% solve(t(Amat) %*% Amat)
x1mat <- if (p1) fit at x[, colx1.index, drop = FALSE] else NULL
@@ -1495,21 +1510,22 @@ get.rrvglm.se1 = function(fit, omit13 = FALSE, kill.all = FALSE,
zmat <- fit at predictors + fit at residuals
theta <- c(Amat[-c(Index.corner,szero),])
if (fit at control$checkwz)
- wz = checkwz(wz, M=M, trace = trace, wzeps=fit at control$wzepsilon)
- U <- vchol(wz, M=M, n=n, silent= TRUE)
+ wz = checkwz(wz, M = M, trace = trace,
+ wzepsilon = fit at control$wzepsilon)
+ U <- vchol(wz, M = M, n = n, silent= TRUE)
if (numerical) {
- delct.da <- num.deriv.rrr(fit, M=M, r=Rank,
- x1mat=x1mat, x2mat=x2mat, p2=p2,
+ delct.da <- num.deriv.rrr(fit, M = M, r = Rank,
+ x1mat=x1mat, x2mat=x2mat, p2 = p2,
Index.corner, Aimat=Amat, B1mat=B1mat, Cimat=Cmat,
h.step = h.step, colx2.index=colx2.index,
xij = fit at control$xij,
szero = szero)
} else {
- delct.da <- dctda.fast.only(theta=theta, wz=wz, U=U, zmat,
- M=M, r=Rank, x1mat=x1mat, x2mat=x2mat, p2=p2,
+ delct.da <- dctda.fast.only(theta=theta, wz = wz, U = U, zmat,
+ M = M, r = Rank, x1mat=x1mat, x2mat=x2mat, p2 = p2,
Index.corner, Aimat=Amat, B1mat=B1mat, Cimat=Cmat,
- xij=fit at control$xij,
+ xij = fit at control$xij,
szero = szero)
}
@@ -1588,12 +1604,16 @@ get.rrvglm.se1 = function(fit, omit13 = FALSE, kill.all = FALSE,
}
- fit1122 <- if (dspec) vlm(bb,
- constraint=Blist, crit = "d", weight=wz, data=bbdata,
- save.weight = TRUE, smart = FALSE, trace = trace.arg, x = TRUE) else
- vlm(bb,
- constraint=Blist, crit = "d", weight=wz,
- save.weight = TRUE, smart = FALSE, trace = trace.arg, x = TRUE)
+ fit1122 <- if (dspec)
+ vlm(bb,
+ constraints = Blist, criterion = "d", weights = wz,
+ data = bbdata,
+ save.weight = TRUE, smart = FALSE, trace = trace.arg,
+ x.arg = TRUE) else
+ vlm(bb,
+ constraints = Blist, criterion = "d", weights = wz,
+ save.weight = TRUE, smart = FALSE, trace = trace.arg,
+ x.arg = TRUE)
@@ -1648,7 +1668,7 @@ get.rrvglm.se2 <- function(cov.unscaled, dispersion = 1, coefficients) {
ans <- matrix(coefficients, length(coefficients), 3)
ans[,2] <- sqrt(dispersion) * sqrt(diag(cov.unscaled))
ans[,3] <- ans[,1] / ans[,2]
- dimnames(ans) <- list(d8, c("Value", "Std. Error", "t value"))
+ dimnames(ans) <- list(d8, c("Estimate", "Std. Error", "z value"))
ans
}
@@ -1656,7 +1676,7 @@ get.rrvglm.se2 <- function(cov.unscaled, dispersion = 1, coefficients) {
num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
p2, Index.corner, Aimat, B1mat, Cimat,
- h.step=0.0001, colx2.index,
+ h.step = 0.0001, colx2.index,
xij = NULL, szero = NULL)
{
@@ -1697,9 +1717,9 @@ num.deriv.rrr <- function(fit, M, r, x1mat, x2mat,
wz = fred$weights
deriv.mu <- fred$deriv
- U <- vchol(wz, M=M, n=nn, silent = TRUE)
- tvfor <- vforsub(U, as.matrix(deriv.mu), M=M, n=nn)
- newzmat <- neweta + vbacksub(U, tvfor, M=M, n=nn) - offset
+ U <- vchol(wz, M = M, n = nn, silent = TRUE)
+ tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = nn)
+ newzmat <- neweta + vbacksub(U, tvfor, M = M, n = nn) - offset
if (is.numeric(x1mat))
newzmat = newzmat - x1mat %*% B1mat
@@ -1735,29 +1755,33 @@ dctda.fast.only = function(theta, wz, U, zmat, M, r, x1mat, x2mat,
fred = kronecker(matrix(1,1,r), x2mat)
fred = kronecker(fred, matrix(1,M,1))
barney = kronecker(Aimat, matrix(1,1,p2))
- barney = kronecker(matrix(1,nn,1), barney)
+ barney = kronecker(matrix(1, nn, 1), barney)
temp = array(t(barney*fred), c(p2*r, M, nn))
temp = aperm(temp, c(2,1,3)) # M by p2*r by nn
- temp = mux5(wz, temp, M=M, matrix.arg= TRUE)
+ temp = mux5(wz, temp, M = M, matrix.arg= TRUE)
temp = m2adefault(temp, M=p2*r) # Note M != M here!
- G = solve(rowSums(temp, dims=2)) # p2*r by p2*r
+ G = solve(rowSums(temp, dims = 2)) # p2*r by p2*r
dc.da = array(NA, c(p2, r, M, r)) # different from other functions
if (length(Index.corner) == M)
stop("cannot handle full rank models yet")
cbindex = (1:M)[-Index.corner] # complement of Index.corner
resid2 = if (length(x1mat))
- mux22(t(wz), zmat - x1mat %*% B1mat, M=M, upp = FALSE, as.mat = TRUE) else
- mux22(t(wz), zmat , M=M, upp = FALSE, as.mat = TRUE)
+ mux22(t(wz), zmat - x1mat %*% B1mat, M = M,
+ upper = FALSE, as.matrix = TRUE) else
+ mux22(t(wz), zmat , M = M,
+ upper = FALSE, as.matrix = TRUE)
for(sss in 1:r)
for(ttt in cbindex) {
- fred = t(x2mat) * matrix(resid2[,ttt], p2, nn, byrow= TRUE) # p2 * nn
+ fred = t(x2mat) *
+ matrix(resid2[, ttt], p2, nn, byrow = TRUE) # p2 * nn
temp2 = kronecker(ei(sss,r), rowSums(fred))
for(kkk in 1:r) {
- Wiak = mux22(t(wz), matrix(Aimat[,kkk], nn, M, byrow= TRUE),
- M=M, upper= FALSE, as.mat= TRUE) # nn * M
+ Wiak = mux22(t(wz), matrix(Aimat[,kkk], nn, M, byrow = TRUE),
+ M = M, upper = FALSE,
+ as.matrix = TRUE) # nn * M
wxx = Wiak[,ttt] * x2mat
blocki = t(x2mat) %*% wxx
temp4a = blocki %*% Cimat[,kkk]
@@ -1802,8 +1826,8 @@ dcda.fast = function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
Blist[[ii]] = Aimat
}
- coeffs = vlm.wfit(xmat=xmat, z, Blist, U=U, matrix.out = TRUE,
- xij=xij)$mat.coef
+ coeffs = vlm.wfit(xmat=xmat, z, Blist, U = U, matrix.out = TRUE,
+ xij = xij)$mat.coef
c3 <- coeffs <- t(coeffs) # transpose to make M x (pp+1)
@@ -1816,41 +1840,44 @@ dcda.fast = function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
fred <- kronecker(matrix(1,1,r), if (intercept) xmat[,-1,drop = FALSE] else xmat)
fred <- kronecker(fred, matrix(1,M,1))
barney <- kronecker(Aimat, matrix(1,1,pp))
- barney <- kronecker(matrix(1,nn,1), barney)
+ barney <- kronecker(matrix(1, nn, 1), barney)
temp <- array(t(barney*fred), c(r*pp,M,nn))
temp <- aperm(temp, c(2,1,3))
- temp <- mux5(wz, temp, M=M, matrix.arg= TRUE)
+ temp <- mux5(wz, temp, M = M, matrix.arg= TRUE)
temp <- m2adefault(temp, M=r*pp) # Note M != M here!
- G = solve(rowSums(temp, dims=2))
+ G = solve(rowSums(temp, dims = 2))
dc.da <- array(NA, c(pp,r,M,r)) # different from other functions
cbindex <- (1:M)[-Index.corner]
- resid2 <- mux22(t(wz), z - matrix(int.vec, nn, M, byrow= TRUE), M=M,
- upper= FALSE, as.mat= TRUE) # mat= TRUE,
+ resid2 <- mux22(t(wz), z - matrix(int.vec, nn, M, byrow = TRUE), M = M,
+ upper = FALSE, as.matrix = TRUE) # mat= TRUE,
for(s in 1:r)
for(tt in cbindex) {
- fred <- (if (intercept) t(xmat[,-1,drop = FALSE]) else
- t(xmat)) * matrix(resid2[,tt],pp,nn,byrow= TRUE)
+ fred <- (if (intercept) t(xmat[, -1, drop = FALSE]) else
+ t(xmat)) * matrix(resid2[, tt], pp, nn, byrow = TRUE)
temp2 <- kronecker(ei(s,r), rowSums(fred))
temp4 <- rep(0,pp)
for(k in 1:r) {
- Wiak <- mux22(t(wz), matrix(Aimat[,k],nn,M,byrow= TRUE),
- M=M, upper= FALSE, as.mat= TRUE) # mat= TRUE,
- wxx <- Wiak[,tt] * (if (intercept) xmat[,-1,drop = FALSE] else
+ Wiak <- mux22(t(wz),
+ matrix(Aimat[, k], nn, M, byrow = TRUE),
+ M = M, upper = FALSE, as.matrix = TRUE)
+ wxx <- Wiak[,tt] * (if (intercept)
+ xmat[, -1, drop = FALSE] else
xmat)
- blocki <- (if (intercept) t(xmat[,-1,drop = FALSE]) else
+ blocki <- (if (intercept)
+ t(xmat[, -1, drop = FALSE]) else
t(xmat)) %*% wxx
- temp4 <- temp4 + blocki %*% Cimat[,k]
+ temp4 <- temp4 + blocki %*% Cimat[, k]
}
dc.da[,,tt,s] <- G %*% (temp2 - 2 * kronecker(ei(s,r),temp4))
}
ans1 <- dc.da[,,cbindex,,drop = FALSE] # pp x r x (M-r) x r
ans1 <- aperm(ans1, c(2,1,3,4)) # r x pp x (M-r) x r
- ans1 <- matrix(c(ans1), (M-r)*r, r*pp, byrow= TRUE)
+ ans1 <- matrix(c(ans1), (M-r)*r, r*pp, byrow = TRUE)
detastar.da <- array(0,c(M,r,r,nn))
@@ -1863,19 +1890,19 @@ dcda.fast = function(theta, wz, U, z, M, r, xmat, pp, Index.corner,
}
etastar <- (if (intercept) xmat[,-1,drop = FALSE] else xmat) %*% Cimat
- eta <- matrix(int.vec, nn, M, byrow= TRUE) + etastar %*% t(Aimat)
+ eta <- matrix(int.vec, nn, M, byrow = TRUE) + etastar %*% t(Aimat)
- sumWinv <- solve((m2adefault(t(colSums(wz)), M=M))[,,1])
+ sumWinv <- solve((m2adefault(t(colSums(wz)), M = M))[,,1])
deta0.da <- array(0,c(M,M,r))
- AtWi <- kronecker(matrix(1,nn,1), Aimat)
- AtWi <- mux111(t(wz), AtWi, M=M, upper= FALSE) # matrix.arg= TRUE,
+ AtWi <- kronecker(matrix(1, nn, 1), Aimat)
+ AtWi <- mux111(t(wz), AtWi, M = M, upper= FALSE) # matrix.arg= TRUE,
AtWi <- array(t(AtWi), c(r,M,nn))
for(ss in 1:r) {
- temp90 <- (m2adefault(t(colSums(etastar[,ss]*wz)), M=M))[,,1] #MxM
+ temp90 <- (m2adefault(t(colSums(etastar[,ss]*wz)), M = M))[,,1] #MxM
temp92 <- array(detastar.da[,,ss,], c(M,r,nn))
temp93 <- mux7(temp92, AtWi)
- temp91 = rowSums(temp93, dims=2) # M x M
+ temp91 = rowSums(temp93, dims = 2) # M x M
deta0.da[,,ss] <- -(temp90 + temp91) %*% sumWinv
}
ans2 <- deta0.da[-(1:r),,,drop = FALSE] # (M-r) x M x r
@@ -1907,8 +1934,8 @@ rrr.deriv.rss = function(theta, wz, U, z, M, r, xmat,
Blist[[ii]] = Amat
}
- vlm.wfit(xmat=xmat, z, Blist, U=U, matrix.out = FALSE,
- rss = TRUE, xij=xij)$rss
+ vlm.wfit(xmat=xmat, z, Blist, U = U, matrix.out = FALSE,
+ rss = TRUE, xij = xij)$rss
}
@@ -1938,7 +1965,7 @@ rrr.deriv.gradient.fast = function(theta, wz, U, z, M, r, xmat,
Blist[[i]] = Aimat
}
- coeffs = vlm.wfit(xmat, z, Blist, U=U, matrix.out= TRUE,
+ coeffs = vlm.wfit(xmat, z, Blist, U = U, matrix.out= TRUE,
xij = NULL)$mat.coef
c3 = coeffs = t(coeffs) # transpose to make M x (pp+1)
@@ -1952,31 +1979,34 @@ rrr.deriv.gradient.fast = function(theta, wz, U, z, M, r, xmat,
fred = kronecker(matrix(1,1,r), if (intercept) xmat[,-1,drop = FALSE] else xmat)
fred = kronecker(fred, matrix(1,M,1))
barney = kronecker(Aimat, matrix(1,1,pp))
- barney = kronecker(matrix(1,nn,1), barney)
+ barney = kronecker(matrix(1, nn, 1), barney)
temp = array(t(barney*fred), c(r*pp,M,nn))
temp = aperm(temp, c(2,1,3))
- temp = mux5(wz, temp, M=M, matrix.arg= TRUE)
+ temp = mux5(wz, temp, M = M, matrix.arg= TRUE)
temp = m2adefault(temp, M=r*pp) # Note M != M here!
- G = solve(rowSums(temp, dims=2))
+ G = solve(rowSums(temp, dims = 2))
dc.da = array(NA,c(pp,r,r,M))
cbindex = (1:M)[-Index.corner]
- resid2 = mux22(t(wz), z - matrix(int.vec,nn,M,byrow= TRUE), M=M,
- upper= FALSE, as.mat= TRUE) # mat= TRUE,
+ resid2 = mux22(t(wz), z - matrix(int.vec, nn, M, byrow = TRUE), M = M,
+ upper = FALSE, as.matrix = TRUE)
for(s in 1:r)
for(tt in cbindex) {
fred = (if (intercept) t(xmat[,-1,drop = FALSE]) else
- t(xmat)) * matrix(resid2[,tt],pp,nn,byrow= TRUE)
+ t(xmat)) * matrix(resid2[,tt],pp,nn,byrow = TRUE)
temp2 = kronecker(ei(s,r), rowSums(fred))
temp4 = rep(0,pp)
for(k in 1:r) {
- Wiak = mux22(t(wz), matrix(Aimat[,k],nn,M,byrow= TRUE),
- M=M, upper= FALSE, as.mat= TRUE) # mat= TRUE,
- wxx = Wiak[,tt] * (if (intercept) xmat[,-1,drop = FALSE] else xmat)
- blocki = (if (intercept) t(xmat[,-1,drop = FALSE]) else t(xmat)) %*% wxx
+ Wiak = mux22(t(wz),
+ matrix(Aimat[, k], nn, M, byrow = TRUE),
+ M = M, upper = FALSE, as.matrix = TRUE)
+ wxx = Wiak[,tt] * (if (intercept)
+ xmat[, -1, drop = FALSE] else xmat)
+ blocki = (if (intercept) t(xmat[, -1, drop = FALSE]) else
+ t(xmat)) %*% wxx
temp4 = temp4 + blocki %*% Cimat[,k]
}
dc.da[,,s,tt] = G %*% (temp2 - 2 * kronecker(ei(s,r),temp4))
@@ -1992,33 +2022,33 @@ rrr.deriv.gradient.fast = function(theta, wz, U, z, M, r, xmat,
}
etastar = (if (intercept) xmat[,-1,drop = FALSE] else xmat) %*% Cimat
- eta = matrix(int.vec, nn, M, byrow= TRUE) + etastar %*% t(Aimat)
+ eta = matrix(int.vec, nn, M, byrow = TRUE) + etastar %*% t(Aimat)
- sumWinv = solve((m2adefault(t(colSums(wz)), M=M))[,,1])
+ sumWinv = solve((m2adefault(t(colSums(wz)), M = M))[,,1])
deta0.da = array(0,c(M,M,r))
- AtWi = kronecker(matrix(1,nn,1), Aimat)
- AtWi = mux111(t(wz), AtWi, M=M, upper= FALSE) # matrix.arg= TRUE,
+ AtWi = kronecker(matrix(1, nn, 1), Aimat)
+ AtWi = mux111(t(wz), AtWi, M = M, upper= FALSE) # matrix.arg= TRUE,
AtWi = array(t(AtWi), c(r,M,nn))
for(ss in 1:r) {
- temp90 = (m2adefault(t(colSums(etastar[,ss]*wz)), M=M))[,,1] # M x M
+ temp90 = (m2adefault(t(colSums(etastar[,ss]*wz)), M = M))[,,1] # M x M
temp92 = array(detastar.da[,,ss,],c(M,r,nn))
temp93 = mux7(temp92,AtWi)
- temp91 = rowSums(temp93, dims=2) # M x M
+ temp91 = rowSums(temp93, dims = 2) # M x M
deta0.da[,,ss] = -(temp90 + temp91) %*% sumWinv
}
ans = matrix(0,M,r)
- fred = mux22(t(wz), z-eta, M=M, upper= FALSE, as.mat= TRUE) # mat= TRUE,
- fred.array = array(t(fred %*% Aimat),c(r,1,nn))
+ fred = mux22(t(wz), z - eta, M = M, upper = FALSE, as.matrix = TRUE)
+ fred.array = array(t(fred %*% Aimat),c(r,1, nn))
for(s in 1:r) {
a1 = colSums(fred %*% t(deta0.da[,,s]))
a2 = colSums(fred * etastar[,s])
temp92 = array(detastar.da[,,s,],c(M,r,nn))
temp93 = mux7(temp92, fred.array)
- a3 = rowSums(temp93, dims=2)
+ a3 = rowSums(temp93, dims = 2)
ans[,s] = a1 + a2 + a3
}
@@ -2032,7 +2062,7 @@ rrr.deriv.gradient.fast = function(theta, wz, U, z, M, r, xmat,
-vellipse = function(R, ratio = 1, orientation=0, center=c(0,0), N=300) {
+vellipse = function(R, ratio = 1, orientation = 0, center = c(0,0), N=300) {
if (length(center) != 2) stop("center must be of length 2")
theta = 2*pi*(0:N)/N
x1 = R*cos(theta)
@@ -2050,16 +2080,16 @@ biplot.qrrvglm = function(x, ...) {
lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
add= FALSE, plot.it= TRUE, rug= TRUE, y = FALSE,
- type=c("fitted.values", "predictors"),
+ type = c("fitted.values", "predictors"),
xlab=paste("Latent Variable", if (Rank == 1) "" else " 1", sep = ""),
ylab= if (Rank == 1) switch(type, predictors = "Predictors",
fitted.values = "Fitted values") else "Latent Variable 2",
pcex=par()$cex, pcol=par()$col, pch=par()$pch,
llty=par()$lty, lcol=par()$col, llwd=par()$lwd,
label.arg= FALSE, adj.arg=-0.1,
- ellipse=0.95, Absolute= FALSE,
- elty=par()$lty, ecol=par()$col, elwd=par()$lwd, egrid=200,
- chull.arg= FALSE, clty=2, ccol=par()$col, clwd=par()$lwd,
+ ellipse = 0.95, Absolute= FALSE,
+ elty=par()$lty, ecol=par()$col, elwd=par()$lwd, egrid = 200,
+ chull.arg= FALSE, clty = 2, ccol=par()$col, clwd=par()$lwd,
cpch = " ",
C = FALSE,
OriginC = c("origin","mean"),
@@ -2073,7 +2103,7 @@ lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
type <- as.character(substitute(type))
type <- match.arg(type, c("fitted.values", "predictors"))[1]
- if (is.numeric(OriginC)) OriginC = rep(OriginC, len = 2) else {
+ if (is.numeric(OriginC)) OriginC = rep(OriginC, length.out = 2) else {
if (mode(OriginC) != "character" && mode(OriginC) != "name")
OriginC <- as.character(substitute(OriginC))
OriginC <- match.arg(OriginC, c("origin","mean"))[1]
@@ -2182,15 +2212,15 @@ lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
for(i in 1:ncol(r.curves)) {
cutpoint = object at family@linkfun( if (Absolute) ellipse.temp
else Coef.list at Maximum[i] * ellipse.temp,
- extra=object at extra)
+ extra = object at extra)
if (MSratio > 1)
cutpoint = cutpoint[1,1]
cutpoint = object at family@linkfun(Coef.list at Maximum[i],
- extra=object at extra) - cutpoint
+ extra = object at extra) - cutpoint
if (is.finite(cutpoint) && cutpoint > 0) {
Mmat = diag(rep(ifelse(object at control$Crow1positive, 1, -1),
- len = Rank))
+ length.out = Rank))
etoli = eigen(t(Mmat) %*% Coef.list at Tolerance[,,i] %*% Mmat)
A=ifelse(etoli$val[1]>0,sqrt(2*cutpoint*etoli$val[1]),Inf)
B=ifelse(etoli$val[2]>0,sqrt(2*cutpoint*etoli$val[2]),Inf)
@@ -2201,8 +2231,10 @@ lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
if (object at control$Crow1positive[1])
theta.angle = pi - theta.angle
if (all(is.finite(c(A,B))))
- lines(vellipse(R=2*A, ratio=B/A, orient=theta.angle,
- center=Coef.list at Optimum[,i], N=egrid),
+ lines(vellipse(R = 2*A, ratio = B/A,
+ orientation = theta.angle,
+ center = Coef.list at Optimum[,i],
+ N = egrid),
lwd=elwd[i], col=ecol[i], lty=elty[i])
}
}
@@ -2227,7 +2259,7 @@ lvplot.qrrvglm = function(object, varlvI = FALSE, reference = NULL,
}
}
if (sites) {
- text(nustar[,1], nustar[,2], adj=0.5,
+ text(nustar[,1], nustar[,2], adj = 0.5,
labels = if (is.null(spch)) dimnames(nustar)[[1]] else
rep(spch, length = nrow(nustar)), col=scol, cex=scex, font=sfont)
}
@@ -2241,7 +2273,7 @@ lvplot.rrvglm = function(object,
A = TRUE,
C = TRUE,
scores = FALSE, plot.it= TRUE,
- groups=rep(1,n),
+ groups=rep(1, n),
gapC=sqrt(sum(par()$cxy^2)), scaleA = 1,
xlab = "Latent Variable 1",
ylab = "Latent Variable 2",
@@ -2299,12 +2331,12 @@ lvplot.rrvglm = function(object,
xlab=xlab, ylab=ylab, ...) # xlim etc. supplied through ...
if (A) {
- Aadj = rep(Aadj, len = length(index.nosz))
- Acex = rep(Acex, len = length(index.nosz))
- Acol = rep(Acol, len = length(index.nosz))
+ Aadj = rep(Aadj, length.out = length(index.nosz))
+ Acex = rep(Acex, length.out = length(index.nosz))
+ Acol = rep(Acol, length.out = length(index.nosz))
if (length(Alabels) != M) stop("'Alabels' must be of length ", M)
if (length(Apch)) {
- Apch = rep(Apch, len = length(index.nosz))
+ Apch = rep(Apch, length.out = length(index.nosz))
for(i in index.nosz)
points(Amat[i,1],Amat[i,2],pch=Apch[i],cex=Acex[i],col=Acol[i])
} else {
@@ -2316,12 +2348,12 @@ lvplot.rrvglm = function(object,
if (C) {
p2 = nrow(Cmat)
- gapC = rep(gapC, len = p2)
- Cadj = rep(Cadj, len = p2)
- Ccex = rep(Ccex, len = p2)
- Ccol = rep(Ccol, len = p2)
- Clwd = rep(Clwd, len = p2)
- Clty = rep(Clty, len = p2)
+ gapC = rep(gapC, length.out = p2)
+ Cadj = rep(Cadj, length.out = p2)
+ Ccex = rep(Ccex, length.out = p2)
+ Ccol = rep(Ccol, length.out = p2)
+ Clwd = rep(Clwd, length.out = p2)
+ Clty = rep(Clty, length.out = p2)
if (length(Clabels) != p2)
stop("'length(Clabels)' must be equal to ", p2)
for(ii in 1:p2) {
@@ -2336,13 +2368,13 @@ lvplot.rrvglm = function(object,
if (scores) {
ugrp = unique(groups)
nlev = length(ugrp) # number of groups
- clty = rep(clty, len = nlev)
- clwd = rep(clwd, len = nlev)
- ccol = rep(ccol, len = nlev)
+ clty = rep(clty, length.out = nlev)
+ clwd = rep(clwd, length.out = nlev)
+ ccol = rep(ccol, length.out = nlev)
if (length(spch))
- spch = rep(spch, len = n)
- scol = rep(scol, len = n)
- scex = rep(scex, len = n)
+ spch = rep(spch, length.out = n)
+ scol = rep(scol, length.out = n)
+ scex = rep(scex, length.out = n)
for(ii in ugrp) {
gp = groups == ii
if (nlev > 1 && (length(unique(spch[gp])) != 1 ||
@@ -2384,10 +2416,12 @@ lvplot.rrvglm = function(object,
p1 = length(colx1.index) # May be 0
Amat <- object at constraints[[colx2.index[1]]]
- B1mat = if (p1) coefvlm(object, mat = TRUE)[colx1.index,,drop = FALSE] else NULL
+ B1mat = if (p1)
+ coefvlm(object, matrix.out = TRUE)[colx1.index,,drop = FALSE] else
+ NULL
- C.try <- coefvlm(object, mat = TRUE)[colx2.index, , drop = FALSE]
+ C.try <- coefvlm(object, matrix.out = TRUE)[colx2.index, , drop = FALSE]
Cmat <- C.try %*% Amat %*% solve(t(Amat) %*% Amat)
@@ -2424,7 +2458,7 @@ setMethod("Coef", "rrvglm", function(object, ...) Coef.rrvglm(object, ...))
-printCoef.rrvglm = function(x, ...) {
+show.Coef.rrvglm = function(x, ...) {
object = x
@@ -2491,7 +2525,7 @@ summary.qrrvglm = function(object,
-printsummary.qrrvglm = function(x, ...) {
+show.summary.qrrvglm = function(x, ...) {
@@ -2524,19 +2558,20 @@ setMethod("summary", "qrrvglm",
function(object, ...)
summary.qrrvglm(object, ...))
- setMethod("print", "summary.qrrvglm",
- function(x, ...)
- invisible(printsummary.qrrvglm(x, ...)))
+
+
+
setMethod("show", "summary.qrrvglm",
function(object)
- invisible(printsummary.qrrvglm(object)))
+ show.summary.qrrvglm(object))
+
+
+
-setMethod("print", "Coef.rrvglm", function(x, ...)
- invisible(printCoef.rrvglm(x, ...)))
setMethod("show", "Coef.rrvglm", function(object)
- invisible(printCoef.rrvglm(object)))
+ show.Coef.rrvglm(object))
@@ -2610,7 +2645,7 @@ setMethod("show", "Coef.rrvglm", function(object)
warn.save = options()$warn
options(warn = -3) # Suppress the warnings (hopefully, temporarily)
answer = if (is(object.save, "rrvglm")) object.save else
- rrvglm(as.formula(str2), fam = poissonff,
+ rrvglm(as.formula(str2), family = poissonff,
constraints = cms, control = myrrcontrol,
data = .grc.df)
options(warn = warn.save)
@@ -2646,9 +2681,9 @@ trplot.qrrvglm = function(object,
cex=par()$cex,
col = 1:(nos*(nos-1)/2),
log = "",
- lty = rep(par()$lty, len = nos*(nos-1)/2),
- lwd = rep(par()$lwd, len = nos*(nos-1)/2),
- tcol= rep(par()$col, len = nos*(nos-1)/2),
+ lty = rep(par()$lty, length.out = nos*(nos-1)/2),
+ lwd = rep(par()$lwd, length.out = nos*(nos-1)/2),
+ tcol= rep(par()$col, length.out = nos*(nos-1)/2),
xlab = NULL, ylab = NULL,
main = "", # "Trajectory plot",
type = "b", check.ok = TRUE, ...) {
@@ -2660,7 +2695,7 @@ trplot.qrrvglm = function(object,
M = object at misc$M #
nn = nrow(fv) # Number of sites
if (length(sitenames))
- sitenames = rep(sitenames, len = nn)
+ sitenames = rep(sitenames, length.out = nn)
sppNames = dimnames(object at y)[[2]]
if (!length(whichSpecies)) {
whichSpecies = sppNames[1:NOS]
@@ -2680,8 +2715,8 @@ trplot.qrrvglm = function(object,
if (!(length(cx1i) == 1 && names(cx1i) == "(Intercept)"))
stop("trajectory plots allowable only for Norrr = ~ 1 models")
- first.spp = iam(1,1,M=M,both = TRUE,diag = FALSE)$row.index
- second.spp = iam(1,1,M=M,both = TRUE,diag = FALSE)$col.index
+ first.spp = iam(1,1,M = M,both = TRUE,diag = FALSE)$row.index
+ second.spp = iam(1,1,M = M,both = TRUE,diag = FALSE)$col.index
myxlab = if (length(whichSpecies.numer) == 2) {
paste("Fitted value for",
if (is.character(whichSpecies.numer)) whichSpecies.numer[1] else
@@ -2703,10 +2738,10 @@ trplot.qrrvglm = function(object,
ylab=myylab, main=main, ...)
}
- lwd = rep(lwd, len = nos*(nos-1)/2)
- col = rep(col, len = nos*(nos-1)/2)
- lty = rep(lty, len = nos*(nos-1)/2)
- tcol = rep(tcol, len = nos*(nos-1)/2)
+ lwd = rep(lwd, length.out = nos*(nos-1)/2)
+ col = rep(col, length.out = nos*(nos-1)/2)
+ lty = rep(lty, length.out = nos*(nos-1)/2)
+ tcol = rep(tcol, length.out = nos*(nos-1)/2)
oo = order(coef.obj at lv) # Sort by the latent variable
ii = 0
@@ -2751,7 +2786,7 @@ vcovqrrvglm = function(object,
ITolerances = object at control$EqualTolerances,
MaxScale = c("predictors", "response"),
dispersion = rep(if (length(sobj at dispersion)) sobj at dispersion else 1,
- len = M), ...) {
+ length.out = M), ...) {
stop("this function is not yet completed")
if (mode(MaxScale) != "character" && mode(MaxScale) != "name")
@@ -2763,7 +2798,7 @@ vcovqrrvglm = function(object,
sobj = summary(object)
cobj = Coef(object, ITolerances = ITolerances, ...)
M = nrow(cobj at A)
- dispersion = rep(dispersion, len = M)
+ dispersion = rep(dispersion, length.out = M)
if (cobj at Rank != 1)
stop("object must be a rank 1 model")
@@ -2774,7 +2809,7 @@ vcovqrrvglm = function(object,
if ((length(object at control$colx1.index) != 1) ||
(names(object at control$colx1.index) != "(Intercept)"))
stop("Can only handle Norrr=~1 models")
- okvals=c(3*M,2*M+1) # Tries to correspond to EqualTol == c(FALSE,TRUE) resp.
+ okvals = c(3*M,2*M+1) # Tries to correspond to EqualTol == c(FALSE,TRUE) resp.
if (all(length(coef(object)) != okvals))
stop("Can only handle intercepts-only model with EqualTolerances = FALSE")
@@ -2801,7 +2836,7 @@ vcovqrrvglm = function(object,
if (nchar(link.function))
paste(link.function, "(Maximum)", sep = "") else
"Maximum"))
- NAthere = is.na(answer %*% rep(1, len = 3))
+ NAthere = is.na(answer %*% rep(1, length.out = 3))
answer[NAthere,] = NA # NA in tolerance means NA everywhere else
new(Class = "vcov.qrrvglm",
Cov.unscaled=Cov.unscaled,
@@ -2823,13 +2858,13 @@ setClass(Class = "vcov.qrrvglm", representation(
-model.matrix.qrrvglm <- function(object, type=c("lv", "vlm"), ...) {
+model.matrix.qrrvglm <- function(object, type = c("lv", "vlm"), ...) {
if (mode(type) != "character" && mode(type) != "name")
type = as.character(substitute(type))
type = match.arg(type, c("lv","vlm"))[1]
- switch(type, lv=Coef(object, ...)@lv, vlm=object at x)
+ switch(type, lv=Coef(object, ...)@lv, vlm = object at x)
}
setMethod("model.matrix", "qrrvglm", function(object, ...)
@@ -2914,7 +2949,7 @@ perspqrrvglm = function(x, varlvI = FALSE, reference = NULL,
if (plot.it) {
if (!length(oylim))
ylim = c(0, max(fitvals[,whichSpecies.numer])*stretch) # A revision
- col = rep(col, len = length(whichSpecies.numer))
+ col = rep(col, length.out = length(whichSpecies.numer))
llty = rep(llty, leng=length(whichSpecies.numer))
llwd = rep(llwd, leng=length(whichSpecies.numer))
if (!add1)
@@ -3074,7 +3109,7 @@ is.bell.vlm <-
is.bell.rrvglm <- function(object, ...) {
M = object at misc$M
ynames = object at misc$ynames
- ans = rep(FALSE, len = M)
+ ans = rep(FALSE, length.out = M)
if (length(ynames)) names(ans) = ynames
ans
}
diff --git a/R/family.survival.R b/R/family.survival.R
index 3783f28..391951b 100644
--- a/R/family.survival.R
+++ b/R/family.survival.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -17,10 +17,12 @@
esd = list(),
imu = NULL, isd = NULL, zero = 2)
{
- if (!is.Numeric(r1, allow = 1, integ = TRUE) || r1 < 0)
- stop("bad input for r1")
- if (!is.Numeric(r2, allow = 1, integ = TRUE) || r2 < 0)
- stop("bad input for r2")
+ if (!is.Numeric(r1, allowable.length = 1, integer.valued = TRUE) ||
+ r1 < 0)
+ stop("bad input for 'r1'")
+ if (!is.Numeric(r2, allowable.length = 1, integer.valued = TRUE) ||
+ r2 < 0)
+ stop("bad input for 'r2'")
if (mode(lmu) != "character" && mode(lmu) != "name")
lmu = as.character(substitute(lmu))
if (mode(lsd) != "character" && mode(lsd) != "name")
@@ -48,7 +50,8 @@
if (ncol(y <- cbind(y)) != 1)
stop("the response must be a vector or a one-column matrix")
- if (length(w) != n || !is.Numeric(w, integ = TRUE, posit = TRUE))
+ if (length(w) != n ||
+ !is.Numeric(w, integer.valued = TRUE, positive = TRUE))
stop("the argument 'weights' must be a vector ",
"of positive integers")
@@ -70,7 +73,7 @@
.emu = emu, .esd = esd,
.imu = imu, .isd = isd,
.r1 = r1, .r2 = r2 ))),
- linkinv = function(eta, extra = NULL) eta[,1],
+ linkinv = function(eta, extra = NULL) eta[, 1],
last = eval(substitute(expression({
misc$link = c(mu = .lmu , sd = .lsd )
misc$earg = list(mu = .emu , sd = .esd )
@@ -144,6 +147,7 @@
+
dbisa = function(x, shape, scale = 1, log = FALSE) {
if (!is.logical(log.arg <- log))
stop("bad input for argument 'log'")
@@ -162,21 +166,28 @@ dbisa = function(x, shape, scale = 1, log = FALSE) {
if (log.arg) logdensity else exp(logdensity)
}
+
pbisa = function(q, shape, scale=1) {
- if (!is.Numeric(q)) stop("bad input for argument 'q'")
- if (!is.Numeric(shape, pos = TRUE)) stop("bad input for argument 'shape'")
- if (!is.Numeric(scale, pos = TRUE)) stop("bad input for argument 'scale'")
+ if (!is.Numeric(q))
+ stop("bad input for argument 'q'")
+ if (!is.Numeric(shape, positive = TRUE))
+ stop("bad input for argument 'shape'")
+ if (!is.Numeric(scale, positive = TRUE))
+ stop("bad input for argument 'scale'")
ans = pnorm(((temp <- sqrt(q/scale)) - 1/temp) / shape)
ans[scale < 0 | shape < 0] = NA
ans[q <= 0] = 0
ans
}
+
qbisa = function(p, shape, scale=1) {
- if (!is.Numeric(p, posit = TRUE) || any(p >= 1))
+ if (!is.Numeric(p, positive = TRUE) || any(p >= 1))
stop("argument 'p' must have values inside the interval (0,1)")
- if (!is.Numeric(shape, pos = TRUE)) stop("bad input for argument 'shape'")
- if (!is.Numeric(scale, pos = TRUE)) stop("bad input for argument 'scale'")
+ if (!is.Numeric(shape, positive = TRUE))
+ stop("bad input for argument 'shape'")
+ if (!is.Numeric(scale, positive = TRUE))
+ stop("bad input for argument 'scale'")
A = qnorm(p)
temp1 = A * shape * sqrt(4 + A^2 * shape^2)
ans1 = (2 + A^2 * shape^2 + temp1) * scale / 2
@@ -184,9 +195,11 @@ qbisa = function(p, shape, scale=1) {
ifelse(p < 0.5, pmin(ans1, ans2), pmax(ans1, ans2))
}
+
rbisa = function(n, shape, scale=1) {
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
A = rnorm(use.n)
@@ -219,11 +232,12 @@ rbisa = function(n, shape, scale=1) {
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
- if (length(ishape) && !is.Numeric(ishape, posit = TRUE))
+ if (length(ishape) && !is.Numeric(ishape, positive = TRUE))
stop("bad input for argument 'ishape'")
- if (!is.Numeric(iscale, posit = TRUE))
+ if (!is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 3)
stop("argument 'imethod' must be 1 or 2 or 3")
@@ -240,7 +254,7 @@ rbisa = function(n, shape, scale=1) {
}) , list( .zero = zero))),
initialize = eval(substitute(expression({
if (ncol(y <- cbind(y)) != 1)
- stop("the response must be a vector or a one-column matrix")
+ stop("the response must be a vector or a one-column matrix")
predictors.names =
c(namesof("shape", .lshape, earg = .eshape, tag = FALSE),
namesof("scale", .lscale, tag = FALSE))
@@ -266,21 +280,21 @@ rbisa = function(n, shape, scale=1) {
.eshape = eshape, .escale = escale,
.imethod=imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- sh = eta2theta(eta[,1], .lshape, earg = .eshape)
- sc = eta2theta(eta[,2], .lscale, earg = .escale)
+ sh = eta2theta(eta[, 1], .lshape, earg = .eshape)
+ sc = eta2theta(eta[, 2], .lscale, earg = .escale)
sc * (1 + sh^2 / 2)
}, list( .lshape = lshape, .lscale = lscale,
.eshape = eshape, .escale = escale ))),
last = eval(substitute(expression({
- misc$link = c(shape= .lshape, scale= .lscale)
- misc$earg = list(shape= .eshape, scale= .escale)
+ misc$link = c(shape = .lshape, scale = .lscale)
+ misc$earg = list(shape = .eshape, scale = .escale)
misc$expected = TRUE
}) , list( .lshape = lshape, .lscale = lscale,
.eshape = eshape, .escale = escale ))),
loglikelihood = eval(substitute(
function(mu,y,w,residuals= FALSE,eta, extra = NULL) {
- sh = eta2theta(eta[,1], .lshape, earg = .eshape)
- sc = eta2theta(eta[,2], .lscale, earg = .escale)
+ sh = eta2theta(eta[, 1], .lshape, earg = .eshape)
+ sc = eta2theta(eta[, 2], .lscale, earg = .escale)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(w * dbisa(x=y, shape=sh, scale=sc, log = TRUE))
@@ -289,8 +303,8 @@ rbisa = function(n, shape, scale=1) {
.eshape = eshape, .escale = escale ))),
vfamily=c("bisa"),
deriv = eval(substitute(expression({
- sh = eta2theta(eta[,1], .lshape, earg = .eshape)
- sc = eta2theta(eta[,2], .lscale, earg = .escale)
+ sh = eta2theta(eta[, 1], .lshape, earg = .eshape)
+ sc = eta2theta(eta[, 2], .lscale, earg = .escale)
dl.dsh = ((y/sc - 2 + sc/y) / sh^2 - 1) / sh
dl.dsc = -0.5 / sc + 1/(y+sc) + sqrt(y) * ((y+sc)/y) *
(sqrt(y/sc) - sqrt(sc/y)) / (2 * sh^2 * sc^1.5)
diff --git a/R/family.ts.R b/R/family.ts.R
index a763903..e06c4a5 100644
--- a/R/family.ts.R
+++ b/R/family.ts.R
@@ -1,14 +1,18 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
+
+
+
rrar.Ci <- function(i, coeffs, aa, Ranks., MM) {
index <- cumsum(c(aa, MM*Ranks.))
- ans<-matrix(coeffs[(index[i]+1):index[i+1]], Ranks.[i], MM, byrow=TRUE)
+ ans <- matrix(coeffs[(index[i]+1):index[i+1]],
+ Ranks.[i], MM, byrow = TRUE)
t(ans)
}
rrar.Ak1 <- function(MM, coeffs, Ranks., aa) {
@@ -25,10 +29,15 @@
if (aa > 0 && ptr != aa) stop("something wrong")
Ak1
}
+
+
rrar.Di <- function(i, Ranks.) {
if (Ranks.[1] == Ranks.[i]) diag(Ranks.[i]) else
- rbind(diag(Ranks.[i]), matrix(0, Ranks.[1]-Ranks.[i], Ranks.[i]))
+ rbind(diag(Ranks.[i]),
+ matrix(0, Ranks.[1] - Ranks.[i], Ranks.[i]))
}
+
+
rrar.Mi <- function(i, MM, Ranks., ki) {
if (Ranks.[ki[i]] == MM)
return(NULL)
@@ -43,13 +52,16 @@
}
kronecker(Mi, Ji)
}
+
rrar.Mmat <- function(MM, uu, Ranks., ki) {
Mmat <- NULL
- for(i in uu:1) {
- Mmat <- rbind(Mmat, rrar.Mi(i, MM, Ranks., ki))
+ for(ii in uu:1) {
+ Mmat <- rbind(Mmat, rrar.Mi(ii, MM, Ranks., ki))
}
Mmat
}
+
+
block.diag <- function(A, B) {
if (is.null(A) && is.null(B))
return(NULL)
@@ -63,6 +75,8 @@
temp <- cbind(A, matrix(0, nrow(A), ncol(B)))
rbind(temp, cbind(matrix(0, nrow(B), ncol(A)), B))
}
+
+
rrar.Ht <- function(plag, MM, Ranks., coeffs, aa, uu, ki) {
Htop <- Hbot <- NULL
Mmat <- rrar.Mmat(MM, uu, Ranks., ki) # NULL if full rank
@@ -83,6 +97,8 @@
}
rbind(Htop, Hbot)
}
+
+
rrar.Ut <- function(y, tt, plag, MM) {
Ut <- NULL
if (plag>1)
@@ -91,6 +107,8 @@
}
Ut
}
+
+
rrar.UU <- function(y, plag, MM, n) {
UU <- NULL
for(i in (plag+1):n) {
@@ -98,6 +116,8 @@
}
UU
}
+
+
rrar.Wmat <- function(y, Ranks., MM, ki, plag, aa, uu, n, coeffs) {
temp1 <- rrar.UU(y, plag, MM, n)
temp2 <- t(rrar.Ht(plag, MM, Ranks., coeffs, aa, uu, ki))
@@ -106,7 +126,7 @@
-rrar.control <- function(stepsize=0.5, save.weight=TRUE, ...)
+rrar.control <- function(stepsize = 0.5, save.weight = TRUE, ...)
{
if (stepsize <= 0 || stepsize > 1) {
@@ -117,7 +137,7 @@ rrar.control <- function(stepsize=0.5, save.weight=TRUE, ...)
}
-rrar <- function(Ranks=1, coefstart=NULL)
+ rrar <- function(Ranks = 1, coefstart = NULL)
{
lag.p <- length(Ranks)
@@ -126,7 +146,7 @@ rrar <- function(Ranks=1, coefstart=NULL)
")\n\n",
"Link: ",
namesof("mu_t", "identity"),
- ", t = ", paste(paste(1:lag.p, coll=",", sep="")) ,
+ ", t = ", paste(paste(1:lag.p, coll = ",", sep = "")) ,
""),
initialize = eval(substitute(expression({
Ranks. <- .Ranks
@@ -140,7 +160,7 @@ rrar <- function(Ranks=1, coefstart=NULL)
dsrank <- -sort(-Ranks.) # ==rev(sort(Ranks.))
if (any(dsrank != Ranks.))
stop("Ranks must be a non-increasing sequence")
- if (!is.matrix(y) || ncol(y) ==1) {
+ if (!is.matrix(y) || ncol(y) == 1) {
stop("response must be a matrix with more than one column")
} else {
MM <- ncol(y)
@@ -161,16 +181,17 @@ rrar <- function(Ranks=1, coefstart=NULL)
if (any(w != 1))
stop("all weights should be 1")
- new.coeffs <- .coefstart # Needed for iter=1 of $weight
+ new.coeffs <- .coefstart # Needed for iter = 1 of $weight
new.coeffs <- if (length(new.coeffs))
rep(new.coeffs, len = aa+sum(Ranks.)*MM) else
runif(aa+sum(Ranks.)*MM)
- temp8 <- rrar.Wmat(y.save,Ranks.,MM,ki,plag,aa,uu,nn,new.coeffs)
+ temp8 <- rrar.Wmat(y.save, Ranks., MM, ki, plag,
+ aa, uu, nn, new.coeffs)
X_vlm_save <- temp8$UU %*% temp8$Ht
if (!length(etastart)) {
etastart <- X_vlm_save %*% new.coeffs
- etastart <- matrix(etastart, ncol=ncol(y), byrow=TRUE) # So M=ncol(y)
+ etastart <- matrix(etastart, ncol = ncol(y), byrow = TRUE)
}
extra$Ranks. <- Ranks.; extra$aa <- aa
@@ -179,14 +200,15 @@ rrar <- function(Ranks=1, coefstart=NULL)
extra$y.save <- y.save
keep.assign <- attr(x, "assign")
- x <- x[-indices,,drop=FALSE]
+ x <- x[-indices, , drop = FALSE]
if (is.R())
attr(x, "assign") <- keep.assign
- y <- y[-indices,,drop=FALSE]
+ y <- y[-indices, , drop = FALSE]
w <- w[-indices]
n.save <- n <- nn - plag
- }), list( .Ranks=Ranks, .coefstart=coefstart ))),
- linkinv=function(eta, extra=NULL) {
+ }), list( .Ranks = Ranks, .coefstart = coefstart ))),
+
+ linkinv = function(eta, extra = NULL) {
aa <- extra$aa
coeffs <- extra$coeffs
MM <- extra$MM
@@ -201,26 +223,28 @@ rrar <- function(Ranks=1, coefstart=NULL)
for(i in 1:plag) {
Di <- rrar.Di(i, Ranks.)
Ci <- rrar.Ci(i, coeffs, aa, Ranks., MM)
- mu <- mu + y.save[tt-i,,drop=FALSE] %*% t(Ak1 %*% Di %*% t(Ci))
+ mu <- mu + y.save[tt-i, , drop = FALSE] %*%
+ t(Ak1 %*% Di %*% t(Ci))
}
mu
},
- last=expression({
- misc$plag <- plag
- misc$Ranks <- Ranks.
- misc$Ak1 <- Ak1
- misc$omegahat <- omegahat
- misc$Cmatrices <- Cmatrices
- misc$Dmatrices <- Dmatrices
- misc$Hmatrix <- temp8$Ht
- misc$Phimatrices <- vector("list", plag)
- for(i in 1:plag) {
- misc$Phimatrices[[i]] = Ak1 %*% Dmatrices[[i]] %*% t(Cmatrices[[i]])
- }
- misc$Z <- y.save %*% t(solve(Ak1))
+ last = expression({
+ misc$plag <- plag
+ misc$Ranks <- Ranks.
+ misc$Ak1 <- Ak1
+ misc$omegahat <- omegahat
+ misc$Cmatrices <- Cmatrices
+ misc$Dmatrices <- Dmatrices
+ misc$Hmatrix <- temp8$Ht
+ misc$Phimatrices <- vector("list", plag)
+ for(ii in 1:plag) {
+ misc$Phimatrices[[ii]] = Ak1 %*% Dmatrices[[ii]] %*%
+ t(Cmatrices[[ii]])
+ }
+ misc$Z <- y.save %*% t(solve(Ak1))
}),
- vfamily="rrar",
- deriv=expression({
+ vfamily = "rrar",
+ deriv = expression({
temp8 <- rrar.Wmat(y.save,Ranks.,MM,ki,plag,aa,uu,nn,new.coeffs)
X_vlm_save <- temp8$UU %*% temp8$Ht
@@ -230,23 +254,23 @@ rrar <- function(Ranks=1, coefstart=NULL)
tt <- (1+plag):nn
Ak1 <- rrar.Ak1(MM, new.coeffs, Ranks., aa)
Cmatrices <- Dmatrices <- vector("list", plag)
- for(i in 1:plag) {
- Dmatrices[[i]] <- Di <- rrar.Di(i, Ranks.)
- Cmatrices[[i]] <- Ci <- rrar.Ci(i, new.coeffs, aa, Ranks., MM)
- resmat <- resmat - y.save[tt-i,,drop=FALSE] %*% t(Ak1 %*% Di %*% t(Ci))
- NULL
+ for(ii in 1:plag) {
+ Dmatrices[[ii]] <- Di <- rrar.Di(ii, Ranks.)
+ Cmatrices[[ii]] <- Ci <- rrar.Ci(ii, new.coeffs, aa, Ranks., MM)
+ resmat <- resmat - y.save[tt - ii, , drop = FALSE] %*%
+ t(Ak1 %*% Di %*% t(Ci))
}
omegahat <- (t(resmat) %*% resmat) / n # MM x MM
omegainv <- solve(omegahat)
omegainv <- solve(omegahat)
- ind1 <- iam(NA,NA,MM,both=TRUE)
+ ind1 <- iam(NA,NA,MM,both = TRUE)
- wz = matrix(omegainv[cbind(ind1$row,ind1$col)],
- nn-plag, length(ind1$row), byrow=TRUE)
- mux22(t(wz), y-mu, M=extra$MM, as.mat=TRUE)
+ wz = matrix(omegainv[cbind(ind1$row, ind1$col)],
+ nn-plag, length(ind1$row), byrow = TRUE)
+ mux22(t(wz), y-mu, M = extra$MM, as.matrix = TRUE)
}),
- weight=expression({
+ weight = expression({
wz
}))
}
@@ -256,143 +280,158 @@ rrar <- function(Ranks=1, coefstart=NULL)
-vglm.garma.control <- function(save.weight=TRUE, ...)
+
+
+
+vglm.garma.control <- function(save.weight = TRUE, ...)
{
list(save.weight = as.logical(save.weight)[1])
}
-garma <- function(link="identity",
- earg =list(),
- p.ar.lag=1, q.lag.ma=0,
- coefstart=NULL,
- step=1.0)
+ garma <- function(link = "identity",
+ earg = list(),
+ p.ar.lag = 1,
+ q.ma.lag = 0,
+ coefstart = NULL,
+ step = 1.0)
{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.Numeric(p.ar.lag, integer=TRUE))
- stop("bad input for argument 'p.ar.lag'")
- if (!is.Numeric(q.lag.ma, integer=TRUE))
- stop("bad input for argument 'q.lag.ma'")
- if (q.lag.ma != 0)
- stop("sorry, only q.lag.ma=0 is currently implemented")
- if (!is.list(earg)) earg = list()
-
- new("vglmff",
- blurb = c("GARMA(", p.ar.lag, ",", q.lag.ma, ")\n\n",
- "Link: ",
- namesof("mu_t", link, earg = earg),
- ", t = ", paste(paste(1:p.ar.lag, coll=",", sep=""))),
- initialize = eval(substitute(expression({
- plag <- .p.ar.lag
- predictors.names = namesof("mu", .link, earg = .earg, tag=FALSE)
- indices <- 1:plag
- tt <- (1+plag):nrow(x)
- pp <- ncol(x)
-
- copy_X_vlm <- TRUE # x matrix changes at each iteration
+ if (mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+
+ if (!is.Numeric(p.ar.lag, integer.valued = TRUE, allowable.length = 1))
+ stop("bad input for argument 'p.ar.lag'")
+ if (!is.Numeric(q.ma.lag, integer.valued = TRUE, allowable.length = 1))
+ stop("bad input for argument 'q.ma.lag'")
+ if (q.ma.lag != 0)
+ stop("sorry, only q.ma.lag = 0 is currently implemented")
+
+ if (!is.list(earg)) earg = list()
+
+
+ new("vglmff",
+ blurb = c("GARMA(", p.ar.lag, ",", q.ma.lag, ")\n\n",
+ "Link: ",
+ namesof("mu_t", link, earg = earg),
+ ", t = ", paste(paste(1:p.ar.lag, coll = ",", sep = ""))),
+ initialize = eval(substitute(expression({
+ plag <- .p.ar.lag
+ predictors.names = namesof("mu", .link, earg = .earg, tag = FALSE)
+ indices <- 1:plag
+ tt <- (1+plag):nrow(x)
+ pp <- ncol(x)
+
+ copy_X_vlm <- TRUE # x matrix changes at each iteration
+
+ if ( .link == "logit" || .link == "probit" || .link == "cloglog" ||
+ .link == "cauchit") {
+ delete.zero.colns <- TRUE
+ eval(process.categorical.data.vgam)
+ mustart <- mustart[tt,2]
+ y <- y[,2]
+ }
- if ( .link == "logit" || .link == "probit" || .link == "cloglog" ||
- .link == "cauchit") {
- delete.zero.colns <- TRUE
- eval(process.categorical.data.vgam)
- mustart <- mustart[tt,2]
- y <- y[,2]
- }
+ x.save <- x # Save the original
+ y.save <- y # Save the original
+ w.save <- w # Save the original
- x.save <- x # Save the original
- y.save <- y # Save the original
- w.save <- w # Save the original
+ new.coeffs <- .coefstart # Needed for iter = 1 of @weight
+ new.coeffs <- if (length(new.coeffs))
+ rep(new.coeffs, len = pp+plag) else
+ c(runif(pp), rep(0, plag))
- new.coeffs <- .coefstart # Needed for iter=1 of @weight
- new.coeffs <- if (length(new.coeffs))
- rep(new.coeffs, len = pp+plag) else
- c(runif(pp), rep(0, plag))
- if (!length(etastart)) {
- etastart <- x[-indices,,drop=FALSE] %*% new.coeffs[1:pp]
- }
- x <- cbind(x, matrix(as.numeric(NA), n, plag)) # Right size now
- dx <- dimnames(x.save)
- morenames <- paste("(lag", 1:plag, ")", sep="")
- dimnames(x) <- list(dx[[1]], c(dx[[2]], morenames))
-
- x <- x[-indices,,drop=FALSE]
- class(x) = if (is.R()) "matrix" else "model.matrix" # Added 27/2/02; 26/2/04
- y <- y[-indices]
- w <- w[-indices]
- n.save <- n <- n - plag
- more <- vector("list", plag)
- names(more) <- morenames
- for(i in 1:plag)
- more[[i]] <- i + max(unlist(attr(x.save, "assign")))
- attr(x, "assign") <- c(attr(x.save, "assign"), more)
- }), list( .link=link, .p.ar.lag=p.ar.lag,
- .coefstart=coefstart, .earg = earg ))),
- linkinv = eval(substitute(function(eta, extra=NULL) {
- eta2theta(eta, link= .link, earg = .earg)
- }, list( .link=link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link <- c(mu = .link)
- misc$earg <- list(mu = .earg)
- misc$plag <- plag
- }), list( .link=link, .earg = earg ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra=NULL) {
- if (residuals) switch( .link,
- identity=y-mu,
- loge=w*(y/mu - 1),
- inverse=w*(y/mu - 1),
- w*(y/mu - (1-y)/(1-mu))) else
- switch( .link,
- identity=sum(w*(y-mu)^2),
- loge=sum(w*(-mu + y*log(mu))),
- inverse=sum(w*(-mu + y*log(mu))),
- sum(w*(y*log(mu) + (1-y)*log1p(-mu))))
- }, list( .link=link, .earg = earg ))),
- middle2 = eval(substitute(expression({
- realfv <- fv
- for(i in 1:plag) {
- realfv <- realfv + old.coeffs[i+pp] *
- (x.save[tt-i,1:pp,drop=FALSE] %*% new.coeffs[1:pp]) # +
- }
+ if (!length(etastart)) {
+ etastart <- x[-indices, , drop = FALSE] %*% new.coeffs[1:pp]
+ }
- true.eta <- realfv + offset
- mu <- family at linkinv(true.eta, extra) # overwrite mu with correct one
- }), list( .link=link, .earg = earg ))),
- vfamily = c("garma", "vglmgam"),
- deriv = eval(substitute(expression({
- dl.dmu <- switch( .link,
- identity=y-mu,
- loge=(y-mu)/mu,
- inverse=(y-mu)/mu,
- (y-mu) / (mu*(1-mu)))
- dmu.deta <- dtheta.deta(mu, .link, earg = .earg)
- step <- .step # This is another method of adjusting step lengths
- step * w * dl.dmu * dmu.deta
- }), list( .link=link, .step=step, .earg = earg ))),
- weight = eval(substitute(expression({
- x[,1:pp] <- x.save[tt,1:pp] # Reinstate
+ x <- cbind(x, matrix(as.numeric(NA), n, plag)) # Right size now
+ dx <- dimnames(x.save)
+ morenames <- paste("(lag", 1:plag, ")", sep = "")
+ dimnames(x) <- list(dx[[1]], c(dx[[2]], morenames))
+
+ x <- x[-indices, , drop = FALSE]
+ class(x) = "matrix"
+ y <- y[-indices]
+ w <- w[-indices]
+ n.save <- n <- n - plag
+ more <- vector("list", plag)
+ names(more) <- morenames
+ for(i in 1:plag)
+ more[[i]] <- i + max(unlist(attr(x.save, "assign")))
+ attr(x, "assign") <- c(attr(x.save, "assign"), more)
+ }), list( .link = link, .p.ar.lag = p.ar.lag,
+ .coefstart = coefstart, .earg = earg ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta, link = .link, earg = .earg)
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link <- c(mu = .link)
+ misc$earg <- list(mu = .earg)
+ misc$plag <- plag
+ }), list( .link = link, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ if (residuals) switch( .link ,
+ identity = y-mu,
+ loge = w*(y/mu - 1),
+ inverse = w*(y/mu - 1),
+ w*(y/mu - (1-y)/(1-mu))) else
+ switch( .link ,
+ identity = sum(w*(y-mu)^2),
+ loge = sum(w*(-mu + y*log(mu))),
+ inverse = sum(w*(-mu + y*log(mu))),
+ sum(w*(y * log(mu) + (1-y) * log1p(-mu))))
+ }, list( .link = link, .earg = earg ))),
+ middle2 = eval(substitute(expression({
+ realfv <- fv
+ for(i in 1:plag) {
+ realfv <- realfv + old.coeffs[i+pp] *
+ (x.save[tt-i, 1:pp,drop = FALSE] %*% new.coeffs[1:pp]) # +
+ }
- for(i in 1:plag) {
- temp = theta2eta(y.save[tt-i], .link, earg = .earg)
- x[,1:pp] <- x[,1:pp] - x.save[tt-i,1:pp] * new.coeffs[i+pp]
- x[,pp+i] <- temp - x.save[tt-i,1:pp,drop=FALSE] %*% new.coeffs[1:pp]
- }
- class(x)=if(is.R()) "matrix" else "model.matrix" # Added 27/2/02; 26/2/04
+ true.eta <- realfv + offset
+ mu <- family at linkinv(true.eta, extra) # overwrite mu with correct one
+ }), list( .link = link, .earg = earg ))),
+ vfamily = c("garma", "vglmgam"),
+ deriv = eval(substitute(expression({
+ dl.dmu <- switch( .link,
+ identity = y-mu,
+ loge = (y - mu) / mu,
+ inverse = (y - mu) / mu,
+ (y - mu) / (mu * (1 - mu)))
+ dmu.deta <- dtheta.deta(mu, .link, earg = .earg)
+ step <- .step # This is another method of adjusting step lengths
+ step * w * dl.dmu * dmu.deta
+ }), list( .link = link,
+ .step = step,
+ .earg = earg ))),
+
+ weight = eval(substitute(expression({
+ x[, 1:pp] <- x.save[tt, 1:pp] # Reinstate
+
+ for(i in 1:plag) {
+ temp = theta2eta(y.save[tt-i], .link, earg = .earg)
+
+
+ x[, 1:pp] <- x[, 1:pp] - x.save[tt-i, 1:pp] * new.coeffs[i+pp]
+ x[, pp+i] <- temp - x.save[tt-i, 1:pp,drop = FALSE] %*%
+ new.coeffs[1:pp]
+ }
+ class(x) = "matrix" # Added 27/2/02; 26/2/04
- if (iter == 1)
- old.coeffs <- new.coeffs
+ if (iter == 1)
+ old.coeffs <- new.coeffs
- X_vlm_save <- lm2vlm.model.matrix(x, Blist, xij=control$xij)
+ X_vlm_save <- lm2vlm.model.matrix(x, Blist, xij = control$xij)
- vary = switch( .link,
- identity=1,
- loge=mu,
- inverse=mu^2,
- mu*(1-mu))
- w * dtheta.deta(mu, link= .link, earg = .earg)^2 / vary
- }), list( .link=link,
- .earg = earg ))))
+ vary = switch( .link ,
+ identity = 1,
+ loge = mu,
+ inverse = mu^2,
+ mu * (1 - mu))
+ w * dtheta.deta(mu, link = .link , earg = .earg )^2 / vary
+ }), list( .link = link,
+ .earg = earg ))))
}
@@ -400,8 +439,9 @@ garma <- function(link="identity",
- if (FALSE) {
-setClass(Class="Coef.rrar", representation(
+ if (FALSE)
+{
+setClass(Class = "Coef.rrar", representation(
"plag" = "integer",
"Ranks" = "integer",
"omega" = "integer",
@@ -415,7 +455,7 @@ setClass(Class="Coef.rrar", representation(
Coef.rrar = function(object, ...) {
- result = new(Class="Coef.rrar",
+ result = new(Class = "Coef.rrar",
"plag" = object at misc$plag,
"Ranks" = object at misc$Ranks,
"omega" = object at misc$omega,
@@ -427,8 +467,12 @@ Coef.rrar = function(object, ...) {
"Ak1" = object at misc$Ak1)
}
-print.Coef.rrar = function(x, ...) {
- cat(x at plag)
+
+
+
+
+show.Coef.rrar <- function(object) {
+ cat(object at plag)
}
@@ -436,9 +480,19 @@ setMethod("Coef", "rrar",
function(object, ...)
Coef(object, ...))
-setMethod("print", "Coef.rrar",
- function(x, ...)
- invisible(print.Coef.rrar(x, ...)))
+
+
+
+setMethod("show", "Coef.rrar",
+ function(object)
+ show.Coef.rrar(object))
+
+
}
+
+
+
+
+
diff --git a/R/family.univariate.R b/R/family.univariate.R
index 5871275..400590d 100644
--- a/R/family.univariate.R
+++ b/R/family.univariate.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -51,8 +51,11 @@ getMaxMin = function(vov, objfun, y, x, w, extraargs = NULL, maximize = TRUE,
ltheta = as.character(substitute(ltheta))
if (mode(lnu) != "character" && mode(lnu) != "name")
lnu = as.character(substitute(lnu))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
+
if (!is.list(etheta)) etheta = list()
if (!is.list(enu)) enu = list()
@@ -83,10 +86,10 @@ getMaxMin = function(vov, objfun, y, x, w, extraargs = NULL, maximize = TRUE,
mccullagh89.aux = function(thetaval, y, x, w, extraargs)
mean((y-thetaval)*(thetaval^2-1)/(1-2*thetaval*y+thetaval^2))
theta.grid = seq(-0.9, 0.9, by=0.05)
- try.this = getMaxMin(theta.grid, objfun=mccullagh89.aux,
+ try.this = getMaxMin(theta.grid, objfun = mccullagh89.aux,
y = y, x = x, w = w, maximize = FALSE,
abs.arg = TRUE)
- try.this = rep(try.this, len = n)
+ try.this = rep(try.this, length.out = n)
try.this
}
tmp = y / (theta.init-y)
@@ -97,31 +100,31 @@ getMaxMin = function(vov, objfun, y, x, w, extraargs = NULL, maximize = TRUE,
etastart = cbind(theta2eta(theta.init, .ltheta, earg = .etheta ),
theta2eta(nu.init, .lnu, earg = .enu ))
}
- }), list( .ltheta=ltheta, .lnu=lnu, .inu=inu, .itheta=itheta,
+ }), list( .ltheta = ltheta, .lnu=lnu, .inu=inu, .itheta = itheta,
.etheta = etheta, .enu=enu ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Theta = eta2theta(eta[,1], .ltheta, earg = .etheta )
- nu = eta2theta(eta[,2], .lnu, earg = .enu )
+ Theta = eta2theta(eta[, 1], .ltheta, earg = .etheta )
+ nu = eta2theta(eta[, 2], .lnu, earg = .enu )
nu*Theta/(1+nu)
- }, list( .ltheta=ltheta, .lnu=lnu,
+ }, list( .ltheta = ltheta, .lnu=lnu,
.etheta = etheta, .enu=enu ))),
last = eval(substitute(expression({
misc$link = c("theta" = .ltheta, "nu" = .lnu)
misc$earg = list("theta" = .etheta, "nu" = .enu )
- }), list( .ltheta=ltheta, .lnu=lnu, .etheta = etheta, .enu=enu ))),
+ }), list( .ltheta = ltheta, .lnu=lnu, .etheta = etheta, .enu=enu ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Theta = eta2theta(eta[,1], .ltheta, earg = .etheta )
- nu = eta2theta(eta[,2], .lnu, earg = .enu )
+ Theta = eta2theta(eta[, 1], .ltheta, earg = .etheta )
+ nu = eta2theta(eta[, 2], .lnu, earg = .enu )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w * ((nu-0.5)*log1p(-y^2) - nu * log1p(-2*Theta*y + Theta^2) -
lbeta(nu+0.5,0.5 )))
- }, list( .ltheta=ltheta, .lnu=lnu, .etheta = etheta, .enu=enu ))),
+ }, list( .ltheta = ltheta, .lnu=lnu, .etheta = etheta, .enu=enu ))),
vfamily = c("mccullagh89"),
deriv = eval(substitute(expression({
- Theta = eta2theta(eta[,1], .ltheta, earg = .etheta )
- nu = eta2theta(eta[,2], .lnu, earg = .enu )
+ Theta = eta2theta(eta[, 1], .ltheta, earg = .etheta )
+ nu = eta2theta(eta[, 2], .lnu, earg = .enu )
dTheta.deta = dtheta.deta(Theta, .ltheta, earg = .etheta )
dnu.deta = dtheta.deta(nu, .lnu, earg = .enu )
dl.dTheta = 2 * nu * (y-Theta) / (1 -2*Theta*y + Theta^2)
@@ -129,7 +132,7 @@ getMaxMin = function(vov, objfun, y, x, w, extraargs = NULL, maximize = TRUE,
digamma(nu+0.5) + digamma(nu+1)
c(w) * cbind(dl.dTheta * dTheta.deta,
dl.dnu * dnu.deta)
- }), list( .ltheta=ltheta, .lnu=lnu, .etheta = etheta, .enu=enu ))),
+ }), list( .ltheta = ltheta, .lnu=lnu, .etheta = etheta, .enu=enu ))),
weight = eval(substitute(expression({
d2l.dTheta2 = (2 * nu^2 / (1+nu)) / (1-Theta^2)
d2l.dnu2 = trigamma(nu+0.5) - trigamma(nu+1)
@@ -137,7 +140,7 @@ getMaxMin = function(vov, objfun, y, x, w, extraargs = NULL, maximize = TRUE,
wz[,iam(1,1,M)] = d2l.dTheta2 * dTheta.deta^2
wz[,iam(2,2,M)] = d2l.dnu2 * dnu.deta^2
c(w) * wz
- }), list( .ltheta=ltheta, .lnu=lnu ))))
+ }), list( .ltheta = ltheta, .lnu=lnu ))))
}
@@ -243,59 +246,64 @@ hzeta.control <- function(save.weight = TRUE, ...)
dhzeta = function(x, alpha, log = FALSE)
{
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
- if (!is.Numeric(alpha, posit = TRUE))
- stop("'alpha' must be numeric and have positive values")
- nn = max(length(x), length(alpha))
- x = rep(x, len = nn); alpha = rep(alpha, len = nn)
- ox = !is.finite(x)
- zero = ox | round(x) != x | x < 1
- ans = rep(0, len = nn)
- ans[!zero] = (2*x[!zero]-1)^(-alpha[!zero]) - (2*x[!zero]+1)^(-alpha[!zero])
- if (log.arg) log(ans) else ans
+ if (!is.Numeric(alpha, positive = TRUE))
+ stop("'alpha' must be numeric and have positive values")
+ nn = max(length(x), length(alpha))
+ x = rep(x, length.out = nn); alpha = rep(alpha, length.out = nn)
+ ox = !is.finite(x)
+ zero = ox | round(x) != x | x < 1
+ ans = rep(0, length.out = nn)
+ ans[!zero] = (2*x[!zero]-1)^(-alpha[!zero]) -
+ (2*x[!zero]+1)^(-alpha[!zero])
+ if (log.arg) log(ans) else ans
}
phzeta = function(q, alpha)
{
- if (!is.Numeric(alpha, posit = TRUE))
- stop("'alpha' must be numeric and have positive values")
- nn = max(length(q), length(alpha))
- q = rep(q, len = nn)
- alpha = rep(alpha, len = nn)
- oq = !is.finite(q)
- zero = oq | q < 1
- q = floor(q)
- ans = 0 * q
- ans[!zero] = 1 - (2*q[!zero]+1)^(-alpha[!zero])
- ans
+
+
+ nn = max(length(q), length(alpha))
+ q = rep(q, length.out = nn)
+ alpha = rep(alpha, length.out = nn)
+ oq = !is.finite(q)
+ zero = oq | q < 1
+ q = floor(q)
+ ans = 0 * q
+ ans[!zero] = 1 - (2*q[!zero]+1)^(-alpha[!zero])
+
+ ans[alpha <= 0] = NaN
+
+ ans
}
qhzeta = function(p, alpha)
{
- if (!is.Numeric(alpha, posit = TRUE))
- stop("'alpha' must be numeric and have positive values")
- if (!is.Numeric(p, posit = TRUE) || any(p >= 1))
- stop("argument 'p' must have values inside the interval (0,1)")
- nn = max(length(p), length(alpha))
- p = rep(p, len = nn)
- alpha = rep(alpha, len = nn)
- ans = (((1 - p)^(-1/alpha) - 1) / 2) # p is in (0,1)
- floor(ans+1)
+
+ if (!is.Numeric(p, positive = TRUE) || any(p >= 1))
+ stop("argument 'p' must have values inside the interval (0,1)")
+
+ nn = max(length(p), length(alpha))
+ p = rep(p, length.out = nn)
+ alpha = rep(alpha, length.out = nn)
+ ans = (((1 - p)^(-1/alpha) - 1) / 2) # p is in (0,1)
+ ans[alpha <= 0] = NaN
+ floor(ans+1)
}
+
rhzeta = function(n, alpha)
{
- if (!is.Numeric(alpha, posit = TRUE))
- stop("'alpha' must be numeric and have positive values")
- if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
- stop("argument 'n' must be a positive integer")
- ans = ((runif(n)^(-1/alpha) - 1) / 2)
- floor(ans+1)
+
+
+ ans = (runif(n)^(-1/alpha) - 1) / 2
+ ans[alpha <= 0] = NaN
+ floor(ans + 1)
}
@@ -309,9 +317,12 @@ rhzeta = function(n, alpha)
if (mode(lphi) != "character" && mode(lphi) != "name")
lphi <- as.character(substitute(lphi))
+
if (length(zero) &&
- !(is.Numeric(zero, integer = TRUE, posit = TRUE) || is.character(zero )))
+ !(is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
+ is.character(zero )))
stop("bad input for argument 'zero'")
+
if (!is.Numeric(iphi, positive = TRUE) || max(iphi) >= 1.0)
stop("bad input for argument 'iphi'")
if (!is.list(ephi)) ephi <- list()
@@ -333,7 +344,7 @@ rhzeta = function(n, alpha)
} else
mycmatrix <- if (M == 1) diag(1) else diag(M)
constraints <- cm.vgam(mycmatrix, x, .PARALLEL,
- constraints, int = TRUE)
+ constraints, intercept.apply = TRUE)
constraints <- cm.zero.vgam(constraints, x, .ZERO, M)
}), list( .parallel = parallel, .zero = zero ))),
initialize = eval(substitute(expression({
@@ -355,7 +366,7 @@ rhzeta = function(n, alpha)
prob.init <- colSums(ycount)
prob.init <- prob.init / sum(prob.init)
prob.init <- matrix(prob.init, n, M, byrow = TRUE)
- phi.init <- rep( .iphi, len = n)
+ phi.init <- rep( .iphi, length.out = n)
etastart <- cbind(log(prob.init[,-M]/prob.init[,M]),
theta2eta(phi.init, .lphi, earg = .ephi ))
}
@@ -387,7 +398,7 @@ rhzeta = function(n, alpha)
ycount <- as.matrix(y * c(w))
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- ans <- rep(0.0, len = n)
+ ans <- rep(0.0, length.out = n)
omega <- extra$n2
for(jay in 1:M) {
maxyj <- max(ycount[,jay])
@@ -431,7 +442,7 @@ rhzeta = function(n, alpha)
probs <- probs / as.vector(probs %*% rep(1, M))
phi <- eta2theta(eta[,M], .lphi, earg = .ephi )
dl.dprobs <- matrix(0.0, n, M-1)
- dl.dphi <- rep(0.0, len = n)
+ dl.dphi <- rep(0.0, length.out = n)
omega <- extra$n2
ycount <- as.matrix(y * c(w))
for(jay in 1:M) {
@@ -497,7 +508,7 @@ rhzeta = function(n, alpha)
for(iii in 1:n) {
rrr <- 1:omega[iii] # A vector
PHI <- phi[iii]
- pYiM.ge.rrr <- 1 - pbetabinom.ab(q=rrr-1, size=omega[iii],
+ pYiM.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1, size=omega[iii],
shape1<-probs[iii,M]*(1/PHI-1),
shape2<-(1-probs[iii,M])*(1/PHI-1)) # A vector
denomM <- ((1-PHI)*probs[iii,M] + (rrr-1)*PHI)^2 # A vector
@@ -506,7 +517,7 @@ rhzeta = function(n, alpha)
sum(1 / (1 + (rrr-2)*PHI)^2)
for(jay in 1:(M-1)) {
denomj <- ((1-PHI)*probs[iii,jay] + (rrr-1)*PHI)^2
- pYij.ge.rrr <- 1 - pbetabinom.ab(q=rrr-1, size=omega[iii],
+ pYij.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1, size=omega[iii],
shape1<-probs[iii,jay]*(1/PHI-1),
shape2<-(1-probs[iii,jay])*(1/PHI-1))
wz[iii,iam(jay,jay,M)] <- wz[iii,iam(jay,jay,M)] +
@@ -527,7 +538,7 @@ rhzeta = function(n, alpha)
for(rrr in 1:maxomega) {
ind5 <- rrr <= omega
PHI <- phi[ind5]
- pYiM.ge.rrr <- 1 - pbetabinom.ab(q=rrr-1, size=omega[ind5],
+ pYiM.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1, size=omega[ind5],
shape1<-probs[ind5,M]*(1/PHI-1),
shape2<-(1-probs[ind5,M])*(1/PHI-1))
denomM <- ((1-PHI)*probs[ind5,M] + (rrr-1)*PHI)^2
@@ -536,7 +547,7 @@ rhzeta = function(n, alpha)
1 / (1 + (rrr-2)*PHI)^2
for(jay in 1:(M-1)) {
denomj <- ((1-PHI)*probs[ind5,jay] + (rrr-1)*PHI)^2
- pYij.ge.rrr <- 1 - pbetabinom.ab(q=rrr-1, size=omega[ind5],
+ pYij.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1, size=omega[ind5],
shape1<-probs[ind5,jay]*(1/PHI-1),
shape2<-(1-probs[ind5,jay])*(1/PHI-1))
wz[ind5,iam(jay,jay,M)] <- wz[ind5,iam(jay,jay,M)] +
@@ -576,13 +587,18 @@ dirmul.old = function(link = "loge", earg = list(), init.alpha = 0.01,
parallel = FALSE, zero = NULL)
{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
- stop("bad input for argument 'zero'")
- if (!is.Numeric(init.alpha, posit = TRUE))
- stop("'init.alpha' must contain positive values only")
- if (!is.list(earg)) earg = list()
+ if (mode(link) != "character" && mode(link) != "name")
+ link = as.character(substitute(link))
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+ if (!is.Numeric(init.alpha, positive = TRUE))
+ stop("'init.alpha' must contain positive values only")
+
+ if (!is.list(earg))
+ earg = list()
new("vglmff",
blurb = c("Dirichlet-Multinomial distribution\n\n",
@@ -592,7 +608,7 @@ dirmul.old = function(link = "loge", earg = list(), init.alpha = 0.01,
"Posterior mean: (n_j + shape_j)/(2*sum(n_j) + sum(shape_j))\n"),
constraints = eval(substitute(expression({
constraints = cm.vgam(matrix(1, M, 1), x, .parallel,
- constraints, int = TRUE)
+ constraints, intercept.apply = TRUE)
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list( .parallel = parallel, .zero = zero ))),
initialize = eval(substitute(expression({
@@ -615,7 +631,7 @@ dirmul.old = function(link = "loge", earg = list(), init.alpha = 0.01,
linkinv = eval(substitute(function(eta, extra = NULL) {
shape = eta2theta(eta, .link, earg = .earg)
M = if (is.matrix(eta)) ncol(eta) else 1
- sumshape = as.vector(shape %*% rep(1, len = M))
+ sumshape = as.vector(shape %*% rep(1, length.out = M))
(extra$y + shape) / (extra$n2 + sumshape)
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
@@ -630,16 +646,16 @@ dirmul.old = function(link = "loge", earg = list(), init.alpha = 0.01,
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
shape = eta2theta(eta, .link, earg = .earg)
M = if (is.matrix(eta)) ncol(eta) else 1
- sumshape = as.vector(shape %*% rep(1, len = M))
+ sumshape = as.vector(shape %*% rep(1, length.out = M))
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
- sum(w*(lgamma(sumshape) - lgamma(extra$n2 + sumshape ))) +
+ sum(w * (lgamma(sumshape) - lgamma(extra$n2 + sumshape ))) +
sum(w * (lgamma(y + shape) - lgamma(shape )))
}, list( .link = link, .earg = earg ))),
vfamily = c("dirmul.old"),
deriv = eval(substitute(expression({
shape = eta2theta(eta, .link, earg = .earg)
- sumshape = as.vector(shape %*% rep(1, len = M))
+ sumshape = as.vector(shape %*% rep(1, length.out = M))
dl.dsh = digamma(sumshape) - digamma(extra$n2 + sumshape) +
digamma(y + shape) - digamma(shape)
dsh.deta = dtheta.deta(shape, .link, earg = .earg)
@@ -649,7 +665,7 @@ dirmul.old = function(link = "loge", earg = list(), init.alpha = 0.01,
index = iam(NA, NA, M, both = TRUE, diag = TRUE)
wz = matrix(trigamma(sumshape)-trigamma(extra$n2 + sumshape),
nrow=n, ncol=dimm(M))
- wz[,1:M] = wz[,1:M] + trigamma(y + shape) - trigamma(shape)
+ wz[, 1:M] = wz[, 1:M] + trigamma(y + shape) - trigamma(shape)
wz = -wz * dsh.deta[, index$row] * dsh.deta[, index$col]
@@ -672,16 +688,21 @@ dirmul.old = function(link = "loge", earg = list(), init.alpha = 0.01,
rdiric = function(n, shape, dimension = NULL) {
- if (!is.numeric(dimension))
- dimension = length(shape)
- shape = rep(shape, len=dimension)
- ans = rgamma(n*dimension, rep(shape, rep(n, dimension)))
- dim(ans) = c(n, dimension)
+ use.n = if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE, allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+
+ if (!is.numeric(dimension))
+ dimension = length(shape)
+ shape = rep(shape, length.out = dimension)
+ ans = rgamma(use.n * dimension, rep(shape, rep(use.n, dimension)))
+ dim(ans) = c(use.n, dimension)
- ans = ans / rowSums(ans)
- ans
+
+ ans = ans / rowSums(ans)
+ ans
}
@@ -693,7 +714,7 @@ rdiric = function(n, shape, dimension = NULL) {
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (length(zero) &&
- !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
if (!is.list(earg)) earg = list()
@@ -727,7 +748,7 @@ rdiric = function(n, shape, dimension = NULL) {
last = eval(substitute(expression({
misc$link = c(shape = .link)
temp.names = paste("shape", 1:M, sep = "")
- misc$link = rep( .link, len = M)
+ misc$link = rep( .link, length.out = M)
names(misc$link) = temp.names
misc$earg = vector("list", M)
names(misc$earg) = names(misc$link)
@@ -738,7 +759,7 @@ rdiric = function(n, shape, dimension = NULL) {
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
shape = eta2theta(eta, .link, earg = .earg )
M = if (is.matrix(eta)) ncol(eta) else 1
- sumshape = as.vector(shape %*% rep(1, len = M))
+ sumshape = as.vector(shape %*% rep(1, length.out = M))
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(c(w) * lgamma(sumshape)) -
@@ -749,7 +770,7 @@ rdiric = function(n, shape, dimension = NULL) {
vfamily = c("dirichlet"),
deriv = eval(substitute(expression({
shape = eta2theta(eta, .link, earg = .earg )
- sumshape = as.vector(shape %*% rep(1, len = M))
+ sumshape = as.vector(shape %*% rep(1, length.out = M))
dl.dsh = digamma(sumshape) - digamma(shape) + log(y)
dsh.deta = dtheta.deta(shape, .link, earg = .earg )
c(w) * dl.dsh * dsh.deta
@@ -757,7 +778,7 @@ rdiric = function(n, shape, dimension = NULL) {
weight = expression({
index = iam(NA, NA, M, both = TRUE, diag = TRUE)
wz = matrix(trigamma(sumshape), nrow=n, ncol=dimm(M))
- wz[,1:M] = wz[,1:M] - trigamma(shape)
+ wz[, 1:M] = wz[, 1:M] - trigamma(shape)
wz = -c(w) * wz * dsh.deta[, index$row] * dsh.deta[, index$col]
wz
}))
@@ -772,7 +793,7 @@ rdiric = function(n, shape, dimension = NULL) {
deriv.arg = deriv
rm(deriv)
- if (!is.Numeric(deriv.arg, allow = 1, integer = TRUE))
+ if (!is.Numeric(deriv.arg, allowable.length = 1, integer.valued = TRUE))
stop("'deriv' must be a single non-negative integer")
if (deriv.arg < 0 || deriv.arg > 2)
stop("'deriv' must be 0, 1, or 2")
@@ -830,7 +851,7 @@ rdiric = function(n, shape, dimension = NULL) {
{
- if (!is.Numeric(deriv.arg, allow = 1, integer = TRUE))
+ if (!is.Numeric(deriv.arg, allowable.length = 1, integer.valued = TRUE))
stop("'deriv.arg' must be a single non-negative integer")
if (deriv.arg < 0 || deriv.arg > 2)
stop("'deriv.arg' must be 0, 1, or 2")
@@ -863,15 +884,15 @@ dzeta = function(x, p, log = FALSE)
stop("bad input for argument 'log'")
rm(log)
- if (!is.Numeric(p, posit = TRUE)) # || min(p) <= 1
+ if (!is.Numeric(p, positive = TRUE)) # || min(p) <= 1
stop("'p' must be numeric and > 0")
LLL = max(length(p), length(x))
- x = rep(x, len = LLL); p = rep(p, len = LLL)
+ x = rep(x, length.out = LLL); p = rep(p, length.out = LLL)
ox = !is.finite(x)
zero = ox | round(x) != x | x < 1
if (any(zero)) warning("non-integer x and/or x < 1 or NAs")
- ans = rep(if (log.arg) log(0) else 0, len = LLL)
+ ans = rep(if (log.arg) log(0) else 0, length.out = LLL)
if (any(!zero)) {
if (log.arg) {
ans[!zero] = (-p[!zero]-1)*log(x[!zero]) - log(zeta(p[!zero]+1))
@@ -886,10 +907,12 @@ dzeta = function(x, p, log = FALSE)
zetaff = function(link = "loge", earg = list(), init.p = NULL)
{
- if (length(init.p) && !is.Numeric(init.p, positi = TRUE))
+ if (length(init.p) && !is.Numeric(init.p, positive = TRUE))
stop("argument 'init.p' must be > 0")
+
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
+
if (!is.list(earg)) earg = list()
new("vglmff",
@@ -912,12 +935,13 @@ dzeta = function(x, p, log = FALSE)
if (!length(etastart)) {
zetaff.Loglikfun = function(pp, y, x, w, extraargs) {
- sum(w * dzeta(x = y, p=pp, log = TRUE))
+ sum(w * dzeta(x = y, p = pp, log = TRUE))
}
- p.grid = seq(0.1, 3.0, len=19)
+ p.grid = seq(0.1, 3.0, length.out = 19)
pp.init = if (length( .init.p )) .init.p else
- getMaxMin(p.grid, objfun=zetaff.Loglikfun, y = y, x = x, w = w)
- pp.init = rep(pp.init, length=length(y))
+ getMaxMin(p.grid, objfun = zetaff.Loglikfun,
+ y = y, x = x, w = w)
+ pp.init = rep(pp.init, length = length(y))
if ( .link == "loglog") pp.init[pp.init <= 1] = 1.2
etastart = theta2eta(pp.init, .link, earg = .earg)
}
@@ -937,7 +961,7 @@ dzeta = function(x, p, log = FALSE)
pp = eta2theta(eta, .link, earg = .earg)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dzeta(x = y, p=pp, log = TRUE))
+ sum(w * dzeta(x = y, p = pp, log = TRUE))
}
}, list( .link = link, .earg = earg ))),
vfamily = c("zetaff"),
@@ -958,19 +982,19 @@ dzeta = function(x, p, log = FALSE)
-gharmonic = function(n, s = 1, lognexponent=0) {
+gharmonic = function(n, s = 1, lognexponent = 0) {
- if (!is.Numeric(n, integ = TRUE, posit = TRUE))
+ if (!is.Numeric(n, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'n'")
- if (!is.Numeric(lognexponent, allow = 1))
+ if (!is.Numeric(lognexponent, allowable.length = 1))
stop("bad input for argument 'lognexponent'")
if (length(n) == 1 && length(s) == 1) {
if (lognexponent != 0) sum(log(1:n)^lognexponent * (1:n)^(-s)) else
sum((1:n)^(-s))
} else {
LEN = max(length(n), length(s))
- n = rep(n, len = LEN)
- ans = s = rep(s, len = LEN)
+ n = rep(n, length.out = LEN)
+ ans = s = rep(s, length.out = LEN)
if (lognexponent != 0) {
for(ii in 1:LEN)
ans[ii] = sum(log(1:n[ii])^lognexponent * (1:n[ii])^(-s[ii]))
@@ -989,12 +1013,12 @@ dzipf = function(x, N, s, log = FALSE)
if (!is.Numeric(x))
stop("bad input for argument 'x'")
- if (!is.Numeric(N, integ = TRUE, posit = TRUE))
+ if (!is.Numeric(N, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'N'")
- if (!is.Numeric(s, posit = TRUE))
+ if (!is.Numeric(s, positive = TRUE))
stop("bad input for argument 's'")
nn = max(length(x), length(N), length(s))
- x = rep(x, len = nn); N = rep(N, len = nn); s = rep(s, len = nn);
+ x = rep(x, length.out = nn); N = rep(N, length.out = nn); s = rep(s, length.out = nn);
ox = !is.finite(x)
zero = ox | round(x) != x | x < 1 | x > N
ans = (if (log.arg) log(0) else 0) * x
@@ -1013,13 +1037,13 @@ dzipf = function(x, N, s, log = FALSE)
pzipf = function(q, N, s) {
if (!is.Numeric(q))
stop("bad input for argument 'q'")
- if (!is.Numeric(N, integ = TRUE, posit = TRUE))
+ if (!is.Numeric(N, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'N'")
- if (!is.Numeric(s, posit = TRUE))
+ if (!is.Numeric(s, positive = TRUE))
stop("bad input for argument 's'")
nn = max(length(q), length(N), length(s))
- q = rep(q, len = nn); N = rep(N, len = nn); s = rep(s, len = nn);
+ q = rep(q, length.out = nn); N = rep(N, length.out = nn); s = rep(s, length.out = nn);
oq = !is.finite(q)
zeroOR1 = oq | q < 1 | q >= N
floorq = floor(q)
@@ -1035,10 +1059,10 @@ pzipf = function(q, N, s) {
zipf = function(N = NULL, link = "loge", earg = list(), init.s = NULL)
{
if (length(N) &&
- (!is.Numeric(N, positi = TRUE, integ = TRUE, allow = 1) || N <= 1))
+ (!is.Numeric(N, positive = TRUE, integer.valued = TRUE, allowable.length = 1) || N <= 1))
stop("bad input for argument 'N'")
enteredN = length(N)
- if (length(init.s) && !is.Numeric(init.s, positi = TRUE))
+ if (length(init.s) && !is.Numeric(init.s, positive = TRUE))
stop("argument 'init.s' must be > 0")
if (mode(link) != "character" && mode(link) != "name")
@@ -1061,7 +1085,7 @@ pzipf = function(q, N, s) {
stop("y must be integer-valued")
predictors.names = namesof("s", .link, earg = .earg, tag = FALSE)
NN = .N
- if (!is.Numeric(NN, allow = 1, posit = TRUE, integ = TRUE))
+ if (!is.Numeric(NN, allowable.length = 1, positive = TRUE, integer.valued = TRUE))
NN = max(y)
if (max(y) > NN)
stop("maximum of the response is greater than argument 'N'")
@@ -1073,7 +1097,8 @@ pzipf = function(q, N, s) {
sum(w * dzipf(x = y, N=extra$N, s=ss, log = TRUE))
}
ss.init = if (length( .init.s )) .init.s else
- getInitVals(gvals=seq(0.1, 3.0, len=19), llfun=llfun,
+ getInitVals(gvals = seq(0.1, 3.0, length.out = 19),
+ llfun=llfun,
y = y, N=extra$N, w = w)
ss.init = rep(ss.init, length=length(y))
if ( .link == "loglog") ss.init[ss.init <= 1] = 1.2
@@ -1132,19 +1157,22 @@ cauchy.control <- function(save.weight = TRUE, ...)
llocation = as.character(substitute(llocation))
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 3)
stop("argument 'imethod' must be 1 or 2 or 3")
if (!is.list(elocation)) elocation = list()
if (!is.list(escale)) escale = list()
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
if (length(nsimEIM) &&
- (!is.Numeric(nsimEIM, allow = 1, integ = TRUE) || nsimEIM <= 50))
+ (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE) ||
+ nsimEIM <= 50))
stop("argument 'nsimEIM' should be an integer greater than 50")
- if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
+ if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
- if (!is.Numeric(iprobs, posit = TRUE) || max(iprobs) >= 1)
+ if (!is.Numeric(iprobs, positive = TRUE) || max(iprobs) >= 1)
stop("bad input for argument 'iprobs'")
new("vglmff",
@@ -1178,13 +1206,13 @@ cauchy.control <- function(save.weight = TRUE, ...)
sum(w * dcauchy(x = y, loc=loc, scale=scal, log = TRUE))
}
loc.grid = c(quantile(y, probs=seq(0.1, 0.9, by=0.05)))
- try.this = getMaxMin(loc.grid, objfun=cauchy2.Loglikfun,
+ try.this = getMaxMin(loc.grid, objfun = cauchy2.Loglikfun,
y = y, x = x, w = w)
- try.this = rep(c(try.this), len = n)
+ try.this = rep(c(try.this), length.out = n)
try.this
}
}
- loc.init = rep(c(loc.init), len = n)
+ loc.init = rep(c(loc.init), length.out = n)
sca.init = if (length( .iscale)) .iscale else {
@@ -1197,7 +1225,7 @@ cauchy.control <- function(save.weight = TRUE, ...)
sca.init
}
- sca.init = rep(c(sca.init), len = n)
+ sca.init = rep(c(sca.init), length.out = n)
if ( .llocation == "loge") loc.init = abs(loc.init)+0.01
etastart = cbind(theta2eta(loc.init, .llocation, earg = .elocation),
theta2eta(sca.init, .lscale, earg = .escale))
@@ -1206,7 +1234,7 @@ cauchy.control <- function(save.weight = TRUE, ...)
.iscale = iscale, .escale = escale, .lscale = lscale,
.iprobs=iprobs, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[,1], .llocation, earg = .elocation)
+ eta2theta(eta[, 1], .llocation, earg = .elocation)
}, list( .llocation = llocation,
.elocation = elocation ))),
last = eval(substitute(expression({
@@ -1219,8 +1247,8 @@ cauchy.control <- function(save.weight = TRUE, ...)
.llocation = llocation, .lscale = lscale ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- location = eta2theta(eta[,1], .llocation, earg = .elocation)
- myscale = eta2theta(eta[,2], .lscale, earg = .escale)
+ location = eta2theta(eta[, 1], .llocation, earg = .elocation)
+ myscale = eta2theta(eta[, 2], .lscale, earg = .escale)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(w * dcauchy(x = y, loc=location, sc=myscale, log = TRUE))
@@ -1229,8 +1257,8 @@ cauchy.control <- function(save.weight = TRUE, ...)
.elocation = elocation, .llocation = llocation ))),
vfamily = c("cauchy"),
deriv = eval(substitute(expression({
- location = eta2theta(eta[,1], .llocation, earg = .elocation)
- myscale = eta2theta(eta[,2], .lscale, earg = .escale)
+ location = eta2theta(eta[, 1], .llocation, earg = .elocation)
+ myscale = eta2theta(eta[, 2], .lscale, earg = .escale)
dlocation.deta = dtheta.deta(location, .llocation, earg = .elocation)
dscale.deta = dtheta.deta(myscale, .lscale, earg = .escale)
Z = (y-location) / myscale
@@ -1246,7 +1274,7 @@ cauchy.control <- function(save.weight = TRUE, ...)
dthetas.detas = cbind(dlocation.deta, dscale.deta)
if (length( .nsimEIM )) {
for(ii in 1:( .nsimEIM )) {
- ysim = rcauchy(n, loc=location, scale=myscale)
+ ysim = rcauchy(n, loc = location, scale = myscale)
Z = (ysim-location) / myscale
dl.dlocation = 2 * Z / ((1 + Z^2) * myscale)
dl.dscale = (Z^2 - 1) / ((1 + Z^2) * myscale)
@@ -1264,7 +1292,7 @@ cauchy.control <- function(save.weight = TRUE, ...)
} else {
wz = cbind(matrix(0.5 / myscale^2,n,2), matrix(0,n,1)) *
dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
- wz = c(w) * wz[,1:M] # diagonal wz
+ wz = c(w) * wz[, 1:M] # diagonal wz
}
wz
@@ -1284,8 +1312,8 @@ cauchy.control <- function(save.weight = TRUE, ...)
{
if (mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
- if (!is.Numeric(scale.arg, posit = TRUE)) stop("bad input for 'scale.arg'")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(scale.arg, positive = TRUE)) stop("bad input for 'scale.arg'")
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 3)
stop("argument 'imethod' must be 1 or 2 or 3")
if (!is.list(elocation)) elocation = list()
@@ -1309,16 +1337,20 @@ cauchy.control <- function(save.weight = TRUE, ...)
if ( .imethod == 3) y else {
cauchy1.Loglikfun = function(loc, y, x, w, extraargs) {
scal = extraargs
- sum(w * dcauchy(x = y, loc=loc, scale=scal, log = TRUE))
+ sum(w * dcauchy(x = y, loc = loc, scale = scal,
+ log = TRUE))
}
- loc.grid = quantile(y, probs=seq(0.1, 0.9, by=0.05))
- try.this = getMaxMin(loc.grid, objfun=cauchy1.Loglikfun,
- y = y, x = x, w = w, extraargs= .scale.arg)
- try.this = rep(try.this, len = n)
+ loc.grid = quantile(y, probs = seq(0.1, 0.9,
+ by = 0.05))
+ try.this = getMaxMin(loc.grid,
+ objfun = cauchy1.Loglikfun,
+ y = y, x = x, w = w,
+ extraargs = .scale.arg )
+ try.this = rep(try.this, length.out = n)
try.this
}
}
- loc.init = rep(loc.init, len = n)
+ loc.init = rep(loc.init, length.out = n)
if ( .llocation == "loge") loc.init = abs(loc.init)+0.01
etastart = theta2eta(loc.init, .llocation, earg = .elocation)
}
@@ -1372,9 +1404,9 @@ cauchy.control <- function(save.weight = TRUE, ...)
{
if (mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
- if (!is.Numeric(scale.arg, allow = 1, posit = TRUE))
+ if (!is.Numeric(scale.arg, allowable.length = 1, positive = TRUE))
stop("'scale.arg' must be a single positive number")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 2)
stop("argument 'imethod' must be 1 or 2")
if (!is.list(elocation)) elocation = list()
@@ -1391,7 +1423,7 @@ cauchy.control <- function(save.weight = TRUE, ...)
earg = .elocation, tag = FALSE)
if (!length(etastart)) {
location.init = if ( .imethod == 1) y else median(rep(y, w))
- location.init = rep(location.init, len = n)
+ location.init = rep(location.init, length.out = n)
if ( .llocation == "loge") location.init = abs(location.init) + 0.001
etastart = theta2eta(location.init, .llocation, earg = .elocation)
}
@@ -1440,9 +1472,11 @@ cauchy.control <- function(save.weight = TRUE, ...)
erlang = function(shape.arg, link = "loge", earg = list(), imethod = 1)
{
- if (!is.Numeric(shape.arg, allow = 1, integer = TRUE, positi = TRUE))
+ if (!is.Numeric(shape.arg, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
stop("'shape' must be a positive integer")
- if (!is.Numeric(imethod, allow = 1, integer = TRUE, positi = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 2)
stop("argument 'imethod' must be 1 or 2")
@@ -1518,15 +1552,15 @@ dbort = function(x, Qsize = 1, a=0.5, log = FALSE) {
rm(log)
if (!is.Numeric(x)) stop("bad input for argument 'x'")
- if (!is.Numeric(Qsize, allow = 1, integ = TRUE, posit = TRUE))
+ if (!is.Numeric(Qsize, allowable.length = 1, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'Qsize'")
- if (!is.Numeric(a, posit = TRUE) || max(a) >= 1)
+ if (!is.Numeric(a, positive = TRUE) || max(a) >= 1)
stop("bad input for argument 'a'")
N = max(length(x), length(Qsize), length(a))
- x = rep(x, len = N); Qsize = rep(Qsize, len = N); a = rep(a, len = N);
+ x = rep(x, length.out = N); Qsize = rep(Qsize, length.out = N); a = rep(a, length.out = N);
xok = (x >= Qsize) & (x == round(x)) & (a > 0) & (a < 1)
- ans = rep(if (log.arg) log(0) else 0, len = N) # loglikelihood
+ ans = rep(if (log.arg) log(0) else 0, length.out = N) # loglikelihood
ans[xok] = lgamma(1 + Qsize[xok]) - lgamma(x[xok] + 1 - Qsize[xok]) +
(x[xok] - 1 - Qsize[xok]) * log(x[xok]) +
(x[xok] - Qsize[xok]) * log(a[xok]) - a[xok] * x[xok]
@@ -1537,36 +1571,40 @@ dbort = function(x, Qsize = 1, a=0.5, log = FALSE) {
}
-rbort = function(n, Qsize = 1, a=0.5) {
- if (!is.Numeric(n, integ = TRUE, posit = TRUE, allow = 1))
- stop("bad input for argument 'n'")
- if (!is.Numeric(Qsize, allow = 1, integ = TRUE, posit = TRUE))
- stop("bad input for argument 'Qsize'")
- if (!is.Numeric(a, posit = TRUE) || max(a) >= 1)
- stop("bad input for argument 'a'")
- N = n
- qsize = rep(Qsize, len = N); a = rep(a, len = N)
- totqsize = qsize
- fini = (qsize < 1)
- while(any(!fini)) {
- additions = rpois(sum(!fini), a[!fini])
- qsize[!fini] = qsize[!fini] + additions
- totqsize[!fini] = totqsize[!fini] + additions
- qsize = qsize - 1
- fini = fini | (qsize < 1)
- }
- totqsize
+rbort = function(n, Qsize = 1, a = 0.5) {
+
+ use.n = if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE, allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+ if (!is.Numeric(Qsize, allowable.length = 1, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'Qsize'")
+ if (!is.Numeric(a, positive = TRUE) ||
+ max(a) >= 1)
+ stop("bad input for argument 'a'")
+
+ N = use.n
+ qsize = rep(Qsize, length.out = N); a = rep(a, length.out = N)
+ totqsize = qsize
+ fini = (qsize < 1)
+ while(any(!fini)) {
+ additions = rpois(sum(!fini), a[!fini])
+ qsize[!fini] = qsize[!fini] + additions
+ totqsize[!fini] = totqsize[!fini] + additions
+ qsize = qsize - 1
+ fini = fini | (qsize < 1)
+ }
+ totqsize
}
borel.tanner = function(Qsize = 1, link = "logit", earg = list(), imethod = 1)
{
- if (!is.Numeric(Qsize, allow = 1, integ = TRUE, posit = TRUE))
+ if (!is.Numeric(Qsize, allowable.length = 1, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'Qsize'")
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 4)
stop("argument 'imethod' must be 1 or 2, 3 or 4")
@@ -1590,9 +1628,9 @@ rbort = function(n, Qsize = 1, a=0.5) {
if (!length(etastart)) {
a.init = switch(as.character( .imethod ),
"1" = 1 - .Qsize / (y+1/8),
- "2" = rep(1 - .Qsize / weighted.mean(y, w), len = n),
- "3" = rep(1 - .Qsize / median(y), len = n),
- "4" = rep(0.5, len = n))
+ "2" = rep(1 - .Qsize / weighted.mean(y, w), length.out = n),
+ "3" = rep(1 - .Qsize / median(y), length.out = n),
+ "4" = rep(0.5, length.out = n))
etastart = theta2eta(a.init, .link, earg = .earg)
}
}), list( .link = link, .earg = earg, .Qsize=Qsize,
@@ -1631,18 +1669,18 @@ rbort = function(n, Qsize = 1, a=0.5) {
-dfelix = function(x, a=0.25, log = FALSE) {
+dfelix = function(x, a = 0.25, log = FALSE) {
if (!is.logical(log.arg <- log))
stop("bad input for argument 'log'")
rm(log)
if (!is.Numeric(x)) stop("bad input for argument 'x'")
- if (!is.Numeric(a, posit = TRUE)) stop("bad input for argument 'a'")
+ if (!is.Numeric(a, positive = TRUE)) stop("bad input for argument 'a'")
N = max(length(x), length(a))
- x = rep(x, len = N); a = rep(a, len = N);
+ x = rep(x, length.out = N); a = rep(a, length.out = N);
xok = (x %% 2 == 1) & (x == round(x)) & (x >= 1) & (a > 0) & (a < 0.5)
- ans = rep(if (log.arg) log(0) else 0, len = N) # loglikelihood
+ ans = rep(if (log.arg) log(0) else 0, length.out = N) # loglikelihood
ans[xok] = ((x[xok]-3)/2) * log(x[xok]) + ((x[xok]-1)/2) * log(a[xok]) -
lgamma(x[xok]/2 + 0.5) - a[xok] * x[xok]
if (!log.arg) {
@@ -1654,13 +1692,13 @@ dfelix = function(x, a=0.25, log = FALSE) {
felix = function(link = "elogit",
- earg=if (link == "elogit") list(min=0, max=0.5) else list(),
+ earg=if (link == "elogit") list(min = 0, max = 0.5) else list(),
imethod = 1)
{
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 4)
stop("argument 'imethod' must be 1 or 2, 3 or 4")
@@ -1681,9 +1719,9 @@ dfelix = function(x, a=0.25, log = FALSE) {
wymean = weighted.mean(y, w)
a.init = switch(as.character( .imethod ),
"1" = (y-1+1/8) / (2*(y+1/8)+1/8),
- "2" = rep((wymean-1+1/8) / (2*(wymean+1/8)+1/8), len = n),
- "3" = rep((median(y)-1+1/8) / (2*(median(y)+1/8)+1/8), len = n),
- "4" = rep(0.25, len = n))
+ "2" = rep((wymean-1+1/8) / (2*(wymean+1/8)+1/8), length.out = n),
+ "3" = rep((median(y)-1+1/8) / (2*(median(y)+1/8)+1/8), length.out = n),
+ "4" = rep(0.25, length.out = n))
etastart = theta2eta(a.init, .link, earg = .earg)
}
}), list( .link = link, .earg = earg,
@@ -1725,11 +1763,11 @@ dfelix = function(x, a=0.25, log = FALSE) {
betaff = function(A=0, B = 1,
lmu = if (A == 0 & B == 1) "logit" else "elogit", lphi = "loge",
- emu = if (lmu == "elogit") list(min=A, max=B) else list(),
+ emu = if (lmu == "elogit") list(min = A, max = B) else list(),
ephi = list(),
imu = NULL, iphi = NULL, imethod = 1, zero = NULL)
{
- if (!is.Numeric(A, allow = 1) || !is.Numeric(B, allow = 1) || A >= B)
+ if (!is.Numeric(A, allowable.length = 1) || !is.Numeric(B, allowable.length = 1) || A >= B)
stop("A must be < B, and both must be of length one")
stdbeta = (A == 0 && B == 1)
@@ -1737,14 +1775,16 @@ dfelix = function(x, a=0.25, log = FALSE) {
lmu = as.character(substitute(lmu))
if (mode(lphi) != "character" && mode(lphi) != "name")
lphi = as.character(substitute(lphi))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (length(imu) && (!is.Numeric(imu, posit = TRUE) ||
+ if (length(imu) && (!is.Numeric(imu, positive = TRUE) ||
any(imu <= A) || any(imu >= B)))
stop("bad input for argument 'imu'")
- if (length(iphi) && !is.Numeric(iphi, posit = TRUE))
+ if (length(iphi) && !is.Numeric(iphi, positive = TRUE))
stop("bad input for argument 'iphi'")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 2)
stop("argument 'imethod' must be 1 or 2")
@@ -1782,13 +1822,13 @@ dfelix = function(x, a=0.25, log = FALSE) {
phi.init = if (is.Numeric( .iphi)) .iphi else
max(0.01, -1 + ( .B-.A)^2 * mu1.init*(1-mu1.init)/var(y))
etastart = matrix(0, n, 2)
- etastart[,1] = theta2eta(mu.init, .lmu, earg = .emu )
- etastart[,2] = theta2eta(phi.init, .lphi, earg = .ephi )
+ etastart[, 1] = theta2eta(mu.init, .lmu, earg = .emu )
+ etastart[, 2] = theta2eta(phi.init, .lphi, earg = .ephi )
}
}), list( .lmu = lmu, .lphi = lphi, .imu=imu, .iphi=iphi,
.A = A, .B = B, .emu = emu, .ephi = ephi, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- mu = eta2theta(eta[,1], .lmu, .emu )
+ mu = eta2theta(eta[, 1], .lmu, .emu )
mu
}, list( .lmu = lmu, .emu = emu, .A = A, .B = B))),
last = eval(substitute(expression({
@@ -1800,23 +1840,23 @@ dfelix = function(x, a=0.25, log = FALSE) {
.stdbeta = stdbeta ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL){
- mu = eta2theta(eta[,1], .lmu, .emu )
+ mu = eta2theta(eta[, 1], .lmu, .emu )
m1u = if ( .stdbeta ) mu else (mu - .A) / ( .B - .A)
- phi = eta2theta(eta[,2], .lphi, .ephi )
+ phi = eta2theta(eta[, 2], .lphi, .ephi )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
shape1 = phi * m1u
shape2 = (1 - m1u) * phi
zedd = (y - .A) / ( .B - .A)
- sum(w * (dbeta(x=zedd, shape1=shape1, shape2=shape2, log = TRUE) -
+ sum(w * (dbeta(x=zedd, shape1 = shape1, shape2 = shape2, log = TRUE) -
log( abs( .B - .A ))))
}
}, list( .lmu = lmu, .lphi = lphi, .A = A, .B = B, .emu = emu, .ephi = ephi,
.stdbeta = stdbeta ))),
vfamily = "betaff",
deriv = eval(substitute(expression({
- mu = eta2theta(eta[,1], .lmu, .emu )
- phi = eta2theta(eta[,2], .lphi, .ephi )
+ mu = eta2theta(eta[, 1], .lmu, .emu )
+ phi = eta2theta(eta[, 2], .lphi, .ephi )
m1u = if ( .stdbeta ) mu else (mu - .A) / ( .B - .A)
dmu.deta = dtheta.deta(mu, .lmu, .emu )
dmu1.dmu = 1 / ( .B - .A)
@@ -1866,16 +1906,21 @@ dfelix = function(x, a=0.25, log = FALSE) {
lshape1 = as.character(substitute(lshape1))
if (mode(lshape2) != "character" && mode(lshape2) != "name")
lshape2 = as.character(substitute(lshape2))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (length( i1 ) && !is.Numeric( i1, posit = TRUE))
+ if (length( i1 ) && !is.Numeric( i1, positive = TRUE))
stop("bad input for argument 'i1'")
- if (length( i2 ) && !is.Numeric( i2, posit = TRUE))
+ if (length( i2 ) && !is.Numeric( i2, positive = TRUE))
stop("bad input for argument 'i2'")
- if (!is.Numeric(A, allow = 1) || !is.Numeric(B, allow = 1) || A >= B)
- stop("A must be < B, and both must be of length one")
- stdbeta = (A == 0 && B == 1) # stdbeta == T iff standard beta distribution
+ if (!is.Numeric(A, allowable.length = 1) ||
+ !is.Numeric(B, allowable.length = 1) ||
+ A >= B)
+ stop("A must be < B, and both must be of length one")
+
+ stdbeta = (A == 0 && B == 1) # stdbeta == T iff standard beta distn
+
if (!is.list(eshape1)) eshape1 = list()
if (!is.list(eshape2)) eshape2 = list()
@@ -1912,20 +1957,20 @@ dfelix = function(x, a=0.25, log = FALSE) {
pinit = max(0.01, uu^2 * (1 - uu) * DD / var(y) - uu)
qinit = max(0.01, pinit * (1 - uu) / uu)
etastart = matrix(0, n, 2)
- etastart[,1] = theta2eta( pinit, .lshape1, earg = .eshape1 )
- etastart[,2] = theta2eta( qinit, .lshape2, earg = .eshape2 )
+ etastart[, 1] = theta2eta( pinit, .lshape1, earg = .eshape1 )
+ etastart[, 2] = theta2eta( qinit, .lshape2, earg = .eshape2 )
}
if (is.Numeric( .i1 ))
- etastart[,1] = theta2eta( .i1, .lshape1, earg = .eshape1 )
+ etastart[, 1] = theta2eta( .i1, .lshape1, earg = .eshape1 )
if (is.Numeric( .i2 ))
- etastart[,2] = theta2eta( .i2, .lshape2, earg = .eshape2 )
+ etastart[, 2] = theta2eta( .i2, .lshape2, earg = .eshape2 )
}), list( .lshape1 = lshape1, .lshape2 = lshape2,
.i1 = i1, .i2 = i2, .trim = trim, .A = A, .B = B,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- shapes = cbind(eta2theta(eta[,1], .lshape1, earg = .eshape1 ),
- eta2theta(eta[,2], .lshape2, earg = .eshape2 ))
- .A + ( .B-.A) * shapes[,1] / (shapes[,1] + shapes[,2])
+ shapes = cbind(eta2theta(eta[, 1], .lshape1, earg = .eshape1 ),
+ eta2theta(eta[, 2], .lshape2, earg = .eshape2 ))
+ .A + ( .B-.A) * shapes[, 1] / (shapes[, 1] + shapes[, 2])
}, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
last = eval(substitute(expression({
@@ -1936,37 +1981,37 @@ dfelix = function(x, a=0.25, log = FALSE) {
.eshape1 = eshape1, .eshape2 = eshape2 ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL){
- shapes = cbind(eta2theta(eta[,1], .lshape1, earg = .eshape1 ),
- eta2theta(eta[,2], .lshape2, earg = .eshape2 ))
+ shapes = cbind(eta2theta(eta[, 1], .lshape1, earg = .eshape1 ),
+ eta2theta(eta[, 2], .lshape2, earg = .eshape2 ))
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
zedd = (y - .A) / ( .B - .A)
- sum(w * (dbeta(x=zedd, shape1=shapes[,1], shape2=shapes[,2],
+ sum(w * (dbeta(x=zedd, shape1 = shapes[, 1], shape2 = shapes[, 2],
log = TRUE) - log( abs( .B - .A ))))
}
}, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
vfamily = "beta.ab",
deriv = eval(substitute(expression({
- shapes = cbind(eta2theta(eta[,1], .lshape1, earg = .eshape1 ),
- eta2theta(eta[,2], .lshape2, earg = .eshape2 ))
- dshapes.deta = cbind(dtheta.deta(shapes[,1], .lshape1, earg = .eshape1),
- dtheta.deta(shapes[,2], .lshape2, earg = .eshape2))
+ shapes = cbind(eta2theta(eta[, 1], .lshape1, earg = .eshape1 ),
+ eta2theta(eta[, 2], .lshape2, earg = .eshape2 ))
+ dshapes.deta = cbind(dtheta.deta(shapes[, 1], .lshape1, earg = .eshape1),
+ dtheta.deta(shapes[, 2], .lshape2, earg = .eshape2))
dl.dshapes = cbind(log(y-.A), log( .B-y)) - digamma(shapes) +
- digamma(shapes[,1] + shapes[,2]) - log( .B - .A)
+ digamma(shapes[, 1] + shapes[, 2]) - log( .B - .A)
c(w) * dl.dshapes * dshapes.deta
}), list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
weight = expression({
- temp2 = trigamma(shapes[,1]+shapes[,2])
- d2l.dshape12 = temp2 - trigamma(shapes[,1])
- d2l.dshape22 = temp2 - trigamma(shapes[,2])
+ temp2 = trigamma(shapes[, 1]+shapes[, 2])
+ d2l.dshape12 = temp2 - trigamma(shapes[, 1])
+ d2l.dshape22 = temp2 - trigamma(shapes[, 2])
d2l.dshape1shape2 = temp2
wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
- wz[,iam(1,1,M)] = d2l.dshape12 * dshapes.deta[,1]^2
- wz[,iam(2,2,M)] = d2l.dshape22 * dshapes.deta[,2]^2
- wz[,iam(1,2,M)] = d2l.dshape1shape2 * dshapes.deta[,1] * dshapes.deta[,2]
+ wz[,iam(1,1,M)] = d2l.dshape12 * dshapes.deta[, 1]^2
+ wz[,iam(2,2,M)] = d2l.dshape22 * dshapes.deta[, 2]^2
+ wz[,iam(1,2,M)] = d2l.dshape1shape2 * dshapes.deta[, 1] * dshapes.deta[, 2]
-c(w) * wz
}))
@@ -2005,17 +2050,17 @@ dfelix = function(x, a=0.25, log = FALSE) {
namesof("shape2", .link, earg = .earg, short = TRUE), "A", "B")
my.range = diff(range(y))
if (!length(etastart)) {
- etastart = cbind(shape1= rep( .i1, len = length(y)),
+ etastart = cbind(shape1= rep( .i1, length.out = length(y)),
shape2= .i2,
A = if (length( .iA)) .iA else min(y)-my.range/70,
B = if (length( .iB)) .iB else max(y)+my.range/70)
}
}), list( .i1=i1, .i2=i2, .iA=iA, .iB=iB, .link = link, .earg = earg ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- shapes = eta2theta(eta[,1:2], .link, earg = .earg)
- .A = eta[,3]
- .B = eta[,4]
- .A + ( .B-.A) * shapes[,1] / (shapes[,1] + shapes[,2])
+ shapes = eta2theta(eta[, 1:2], .link, earg = .earg)
+ .A = eta[, 3]
+ .B = eta[, 4]
+ .A + ( .B-.A) * shapes[, 1] / (shapes[, 1] + shapes[, 2])
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
misc$link = c(shape1 = .link, shape2 = .link,
@@ -2025,58 +2070,58 @@ dfelix = function(x, a=0.25, log = FALSE) {
}), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- shapes = eta2theta(eta[,1:2], .link, earg = .earg)
- .A = eta[,3]
- .B = eta[,4]
- temp = lbeta(shapes[,1], shapes[,2])
+ shapes = eta2theta(eta[, 1:2], .link, earg = .earg)
+ .A = eta[, 3]
+ .B = eta[, 4]
+ temp = lbeta(shapes[, 1], shapes[, 2])
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
- sum(w * ((shapes[,1]-1)*log(y-.A) + (shapes[,2]-1)*log( .B-y) - temp -
- (shapes[,1]+shapes[,2]-1)*log( .B-.A )))
+ sum(w * ((shapes[, 1]-1)*log(y-.A) + (shapes[, 2]-1)*log( .B-y) - temp -
+ (shapes[, 1]+shapes[, 2]-1)*log( .B-.A )))
}, list( .link = link, .earg = earg ))),
vfamily = "beta4",
deriv = eval(substitute(expression({
- shapes = eta2theta(eta[,1:2], .link, earg = .earg)
- .A = eta[,3]
- .B = eta[,4]
+ shapes = eta2theta(eta[, 1:2], .link, earg = .earg)
+ .A = eta[, 3]
+ .B = eta[, 4]
dshapes.deta = dtheta.deta(shapes, .link, earg = .earg)
rr1 = ( .B - .A)
- temp3 = (shapes[,1] + shapes[,2] - 1)
+ temp3 = (shapes[, 1] + shapes[, 2] - 1)
temp1 = temp3 / rr1
dl.dshapes = cbind(log(y-.A), log( .B-y)) - digamma(shapes) +
- digamma(shapes[,1] + shapes[,2]) - log( .B - .A)
- dl.dA = -(shapes[,1]-1) / (y- .A) + temp1
- dl.dB = (shapes[,2]-1) / ( .B - y) - temp1
+ digamma(shapes[, 1] + shapes[, 2]) - log( .B - .A)
+ dl.dA = -(shapes[, 1]-1) / (y- .A) + temp1
+ dl.dB = (shapes[, 2]-1) / ( .B - y) - temp1
c(w) * cbind(dl.dshapes * dshapes.deta, dl.dA, dl.dB)
}), list( .link = link, .earg = earg ))),
weight = expression({
- temp2 = trigamma(shapes[,1]+shapes[,2])
- d2l.dshape12 = temp2 - trigamma(shapes[,1])
- d2l.dshape22 = temp2 - trigamma(shapes[,2])
+ temp2 = trigamma(shapes[, 1]+shapes[, 2])
+ d2l.dshape12 = temp2 - trigamma(shapes[, 1])
+ d2l.dshape22 = temp2 - trigamma(shapes[, 2])
d2l.dshape1shape2 = temp2
- ed2l.dAA = -temp3 * shapes[,2] / ((shapes[,1]-2) * rr1^2)
- ed2l.dBB = -temp3 * shapes[,1] / ((shapes[,2]-2) * rr1^2)
+ ed2l.dAA = -temp3 * shapes[, 2] / ((shapes[, 1]-2) * rr1^2)
+ ed2l.dBB = -temp3 * shapes[, 1] / ((shapes[, 2]-2) * rr1^2)
ed2l.dAB = -temp3 / (rr1^2)
- ed2l.dAshape1 = -shapes[,2] / ((shapes[,1]-1) * rr1)
+ ed2l.dAshape1 = -shapes[, 2] / ((shapes[, 1]-1) * rr1)
ed2l.dAshape2 = 1/rr1
ed2l.dBshape1 = -1/rr1
- ed2l.dBshape2 = shapes[,1] / ((shapes[,2]-1) * rr1)
+ ed2l.dBshape2 = shapes[, 1] / ((shapes[, 2]-1) * rr1)
wz = matrix(as.numeric(NA), n, dimm(M)) #10=dimm(M)
- wz[,iam(1,1,M)] = d2l.dshape12 * dshapes.deta[,1]^2
- wz[,iam(2,2,M)] = d2l.dshape22 * dshapes.deta[,2]^2
- wz[,iam(1,2,M)] = d2l.dshape1shape2 * dshapes.deta[,1] * dshapes.deta[,2]
+ wz[,iam(1,1,M)] = d2l.dshape12 * dshapes.deta[, 1]^2
+ wz[,iam(2,2,M)] = d2l.dshape22 * dshapes.deta[, 2]^2
+ wz[,iam(1,2,M)] = d2l.dshape1shape2 * dshapes.deta[, 1] * dshapes.deta[, 2]
wz[,iam(3,3,M)] = ed2l.dAA
wz[,iam(4,4,M)] = ed2l.dBB
wz[,iam(4,3,M)] = ed2l.dAB
- wz[,iam(3,1,M)] = ed2l.dAshape1 * dshapes.deta[,1]
- wz[,iam(3,2,M)] = ed2l.dAshape2 * dshapes.deta[,2]
- wz[,iam(4,1,M)] = ed2l.dBshape1 * dshapes.deta[,1]
- wz[,iam(4,2,M)] = ed2l.dBshape2 * dshapes.deta[,2]
+ wz[,iam(3,1,M)] = ed2l.dAshape1 * dshapes.deta[, 1]
+ wz[,iam(3,2,M)] = ed2l.dAshape2 * dshapes.deta[, 2]
+ wz[,iam(4,1,M)] = ed2l.dBshape1 * dshapes.deta[, 1]
+ wz[,iam(4,2,M)] = ed2l.dBshape2 * dshapes.deta[, 2]
-c(w) * wz
@@ -2123,7 +2168,7 @@ dfelix = function(x, a=0.25, log = FALSE) {
exponential <- function(link = "loge", earg = list(),
location = 0, expected = TRUE) {
- if (!is.Numeric(location, allow = 1))
+ if (!is.Numeric(location, allowable.length = 1))
stop("bad input for argument 'location'")
if (mode(link) != "character" && mode(link) != "name")
@@ -2267,14 +2312,16 @@ dfelix = function(x, a=0.25, log = FALSE) {
lrate = as.character(substitute(lrate))
if (mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
- if (length( irate) && !is.Numeric(irate, posit = TRUE))
+ if (length( irate) && !is.Numeric(irate, positive = TRUE))
stop("bad input for argument 'irate'")
- if (length( ishape) && !is.Numeric(ishape, posit = TRUE))
+ if (length( ishape) && !is.Numeric(ishape, positive = TRUE))
stop("bad input for argument 'ishape'")
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
if (!is.logical(expected) || length(expected) != 1)
stop("bad input for argument 'expected'")
+
if (!is.list(erate)) erate = list()
if (!is.list(eshape)) eshape = list()
@@ -2303,8 +2350,8 @@ dfelix = function(x, a=0.25, log = FALSE) {
var.y.est = sum(w * junk$resid^2) / (nrow(x) - length(junk$coef))
init.shape = if (length( .ishape)) .ishape else mymu^2 / var.y.est
init.rate = if (length( .irate)) .irate else init.shape / mymu
- init.rate = rep(init.rate, len = n)
- init.shape = rep(init.shape, len = n)
+ init.rate = rep(init.rate, length.out = n)
+ init.shape = rep(init.shape, length.out = n)
if ( .lshape == "loglog")
init.shape[init.shape <= 1] = 3.1 #Hopefully value is big enough
etastart = cbind(theta2eta(init.rate, .lrate, earg = .erate),
@@ -2313,7 +2360,7 @@ dfelix = function(x, a=0.25, log = FALSE) {
}), list( .lrate = lrate, .lshape = lshape, .irate=irate, .ishape = ishape,
.erate = erate, .eshape = eshape ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[,2], .lshape, earg = .eshape) / eta2theta(eta[,1], .lrate,
+ eta2theta(eta[, 2], .lshape, earg = .eshape) / eta2theta(eta[, 1], .lrate,
earg = .erate)
}, list( .lrate = lrate, .lshape = lshape,
.erate = erate, .eshape = eshape ))),
@@ -2324,8 +2371,8 @@ dfelix = function(x, a=0.25, log = FALSE) {
.erate = erate, .eshape = eshape ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- rate = eta2theta(eta[,1], .lrate, earg = .erate)
- shape = eta2theta(eta[,2], .lshape, earg = .eshape)
+ rate = eta2theta(eta[, 1], .lrate, earg = .erate)
+ shape = eta2theta(eta[, 2], .lshape, earg = .eshape)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(w * dgamma(x = y, shape = shape, rate=rate, log = TRUE))
@@ -2334,8 +2381,8 @@ dfelix = function(x, a=0.25, log = FALSE) {
.erate = erate, .eshape = eshape ))),
vfamily = c("gamma2.ab"),
deriv = eval(substitute(expression({
- rate = eta2theta(eta[,1], .lrate, earg = .erate)
- shape = eta2theta(eta[,2], .lshape, earg = .eshape)
+ rate = eta2theta(eta[, 1], .lrate, earg = .erate)
+ shape = eta2theta(eta[, 2], .lshape, earg = .eshape)
dl.drate = mu - y
dl.dshape = log(y*rate) - digamma(shape)
dratedeta = dtheta.deta(rate, .lrate, earg = .erate)
@@ -2375,13 +2422,15 @@ dfelix = function(x, a=0.25, log = FALSE) {
lmu = as.character(substitute(lmu))
if (mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
- if (length(zero) && !is.Numeric(zero, integer = TRUE))
+ if (length(zero) && !is.Numeric(zero, integer.valued = TRUE))
stop("bad input for argument 'zero'")
- if (length( ishape) && !is.Numeric(ishape, posit = TRUE))
+ if (length( ishape) && !is.Numeric(ishape, positive = TRUE))
stop("bad input for argument 'ishape'")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 2)
stop("argument 'imethod' must be 1 or 2")
+
if (!is.list(emu)) emu = list()
if (!is.list(eshape)) eshape = list()
@@ -2407,8 +2456,8 @@ dfelix = function(x, a=0.25, log = FALSE) {
assign("CQO.FastAlgorithm", ( .lmu == "loge" && .lshape == "loge"),
envir = VGAM:::VGAMenv)
if (any(function.name == c("cqo","cao")) &&
- is.Numeric( .zero, allow = 1) && .zero != -2)
- stop("argument zero=-2 is required")
+ is.Numeric( .zero, allowable.length = 1) && .zero != -2)
+ stop("argument zero = -2 is required")
y = as.matrix(y)
M = Musual * ncol(y)
@@ -2483,8 +2532,8 @@ dfelix = function(x, a=0.25, log = FALSE) {
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
Musual <- 2
NOS = ncol(eta) / Musual
- mymu = mu # eta2theta(eta[,2*(1:NOS)-1], .lmu, earg = .emu )
- shapemat = eta2theta(eta[,2*(1:NOS), drop = FALSE], .lshape, earg = .eshape )
+ mymu = mu # eta2theta(eta[, 2*(1:NOS)-1], .lmu, earg = .emu )
+ shapemat = eta2theta(eta[, 2*(1:NOS), drop = FALSE], .lshape, earg = .eshape )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(w * dgamma(x = y, shape = c(shapemat), scale = c(mymu/shapemat),
@@ -2497,8 +2546,8 @@ dfelix = function(x, a=0.25, log = FALSE) {
Musual <- 2
NOS = ncol(eta) / Musual
- mymu = eta2theta(eta[,2*(1:NOS)-1], .lmu, earg = .emu )
- shape = eta2theta(eta[,2*(1:NOS)], .lshape, earg = .eshape )
+ mymu = eta2theta(eta[, 2*(1:NOS)-1], .lmu, earg = .emu )
+ shape = eta2theta(eta[, 2*(1:NOS)], .lshape, earg = .eshape )
dl.dmu = shape * (y / mymu - 1) / mymu
dl.dshape = log(y) + log(shape) - log(mymu) + 1 - digamma(shape) -
@@ -2517,20 +2566,15 @@ dfelix = function(x, a=0.25, log = FALSE) {
ed2l.dshape2 = trigamma(shape) - 1 / shape
wz = matrix(as.numeric(NA), n, M) # 2 = M; diagonal!
- wz[,2*(1:NOS)-1] = ed2l.dmu2 * dmu.deta^2
- wz[,2*(1:NOS)] = ed2l.dshape2 * dshape.deta^2
+ wz[, 2*(1:NOS)-1] = ed2l.dmu2 * dmu.deta^2
+ wz[, 2*(1:NOS)] = ed2l.dshape2 * dshape.deta^2
c(w) * wz
}), list( .lmu = lmu ))))
if (deviance.arg) ans at deviance = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
NOS = ncol(eta) / 2
- temp300 = eta[,2*(1:NOS), drop = FALSE]
- if ( .lshape == "loge") {
- bigval = 28
- temp300[temp300 > bigval] = bigval
- temp300[temp300 < -bigval] = -bigval
- } else stop("can only handle the 'loge' link")
+ temp300 = eta[, 2*(1:NOS), drop = FALSE]
shape = eta2theta(temp300, .lshape, earg = .eshape )
devi = -2 * (log(y/mu) - y/mu + 1)
if (residuals) {
@@ -2545,7 +2589,7 @@ dfelix = function(x, a=0.25, log = FALSE) {
geometric = function(link = "logit", earg = list(), expected = TRUE,
- imethod = 1)
+ imethod = 1, iprob = NULL)
{
if (!is.logical(expected) || length(expected) != 1)
@@ -2555,7 +2599,7 @@ dfelix = function(x, a=0.25, log = FALSE) {
if (!is.list(earg)) earg = list()
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 3)
stop("argument 'imethod' must be 1 or 2 or 3")
@@ -2582,9 +2626,16 @@ dfelix = function(x, a=0.25, log = FALSE) {
if ( .imethod == 1)
1 / (1 + median(rep(y, w)) + 1/16) else
1 / (1 + weighted.mean(y, w) + 1/16)
+
+
+ if (length( .iprob ))
+ prob.init = 0 * prob.init + .iprob
+
+
etastart = theta2eta(prob.init, .link, earg = .earg)
}
- }), list( .link = link, .earg = earg, .imethod = imethod ))),
+ }), list( .link = link, .earg = earg, .imethod = imethod,
+ .iprob = iprob ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
prob = eta2theta(eta, .link, earg = .earg)
(1 - prob) / prob
@@ -2594,7 +2645,9 @@ dfelix = function(x, a=0.25, log = FALSE) {
misc$earg = list(prob = .earg )
misc$expected = .expected
misc$imethod = .imethod
+ misc$iprob = .iprob
}), list( .link = link, .earg = earg,
+ .iprob = iprob,
.expected = expected, .imethod = imethod ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
@@ -2630,14 +2683,17 @@ dfelix = function(x, a=0.25, log = FALSE) {
dbetageom = function(x, shape1, shape2, log = FALSE) {
if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
+ stop("bad input for argument 'log'")
rm(log)
- if (!is.Numeric(x)) stop("bad input for argument 'x'")
- if (!is.Numeric(shape1, pos = TRUE)) stop("bad input for argument 'shape1'")
- if (!is.Numeric(shape2, pos = TRUE)) stop("bad input for argument 'shape2'")
+ if (!is.Numeric(x))
+ stop("bad input for argument 'x'")
+ if (!is.Numeric(shape1, positive = TRUE))
+ stop("bad input for argument 'shape1'")
+ if (!is.Numeric(shape2, positive = TRUE))
+ stop("bad input for argument 'shape2'")
N = max(length(x), length(shape1), length(shape2))
- x = rep(x, len = N); shape1 = rep(shape1, len = N); shape2 = rep(shape2, len = N)
+ x = rep(x, length.out = N); shape1 = rep(shape1, length.out = N); shape2 = rep(shape2, length.out = N)
loglik = lbeta(1+shape1, shape2+abs(x)) - lbeta(shape1, shape2)
xok = (x == round(x) & x >= 0)
loglik[!xok] = log(0)
@@ -2650,17 +2706,20 @@ dbetageom = function(x, shape1, shape2, log = FALSE) {
pbetageom = function(q, shape1, shape2, log.p = FALSE) {
- if (!is.Numeric(q)) stop("bad input for argument 'q'")
- if (!is.Numeric(shape1, pos = TRUE)) stop("bad input for argument 'shape1'")
- if (!is.Numeric(shape2, pos = TRUE)) stop("bad input for argument 'shape2'")
+ if (!is.Numeric(q))
+ stop("bad input for argument 'q'")
+ if (!is.Numeric(shape1, positive = TRUE))
+ stop("bad input for argument 'shape1'")
+ if (!is.Numeric(shape2, positive = TRUE))
+ stop("bad input for argument 'shape2'")
N = max(length(q), length(shape1), length(shape2))
- q = rep(q, len = N); shape1 = rep(shape1, len = N); shape2 = rep(shape2, len = N)
+ q = rep(q, length.out = N); shape1 = rep(shape1, length.out = N); shape2 = rep(shape2, length.out = N)
ans = q * 0 # Retains names(q)
if (max(abs(shape1-shape1[1])) < 1.0e-08 &&
max(abs(shape2-shape2[1])) < 1.0e-08) {
qstar = floor(q)
temp = if (max(qstar) >= 0) dbetageom(x=0:max(qstar),
- shape1=shape1[1], shape2=shape2[1]) else 0*qstar
+ shape1 = shape1[1], shape2 = shape2[1]) else 0*qstar
unq = unique(qstar)
for(i in unq) {
index = qstar == i
@@ -2670,16 +2729,13 @@ pbetageom = function(q, shape1, shape2, log.p = FALSE) {
for(ii in 1:N) {
qstar = floor(q[ii])
ans[ii] = if (qstar >= 0) sum(dbetageom(x=0:qstar,
- shape1=shape1[ii], shape2=shape2[ii])) else 0
+ shape1 = shape1[ii], shape2 = shape2[ii])) else 0
}
if (log.p) log(ans) else ans
}
rbetageom = function(n, shape1, shape2) {
- if (!is.Numeric(n, integ = TRUE,allow = 1)) stop("bad input for argument 'n'")
- if (!is.Numeric(shape1, pos = TRUE)) stop("bad input for argument 'shape1'")
- if (!is.Numeric(shape2, pos = TRUE)) stop("bad input for argument 'shape2'")
- rgeom(n = n, prob = rbeta(n = n, shape1=shape1, shape2=shape2))
+ rgeom(n = n, prob = rbeta(n = n, shape1 = shape1, shape2 = shape2))
}
@@ -2712,42 +2768,54 @@ negbinomial.control <- function(save.weight = TRUE, ...)
+
+
+
+
+
+
+ if (mode(lmu) != "character" && mode(lmu) != "name")
+ lmu = as.character(substitute(lmu))
+ if (mode(lsize) != "character" && mode(lsize) != "name")
+ lsize = as.character(substitute(lsize))
+
lmuuu = lmu
emuuu = emu
imuuu = imu
- if (length(imuuu) && !is.Numeric(imuuu, posit = TRUE))
+ if (!is.list(emuuu)) emuuu = list()
+ if (!is.list(esize)) esize = list()
+
+ if (length(imuuu) && !is.Numeric(imuuu, positive = TRUE))
stop("bad input for argument 'imu'")
- if (length(isize) && !is.Numeric(isize, posit = TRUE))
+ if (length(isize) && !is.Numeric(isize, positive = TRUE))
stop("bad input for argument 'isize'")
- if (!is.Numeric(cutoff, allow = 1) || cutoff < 0.8 || cutoff >= 1)
+ if (!is.Numeric(cutoff, allowable.length = 1) ||
+ cutoff < 0.8 ||
+ cutoff >= 1)
stop("range error in the argument 'cutoff'")
- if (!is.Numeric(Maxiter, integ = TRUE, allow = 1) || Maxiter < 100)
+ if (!is.Numeric(Maxiter, integer.valued = TRUE, allowable.length = 1) ||
+ Maxiter < 100)
stop("bad input for argument 'Maxiter'")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 3)
stop("argument 'imethod' must be 1 or 2 or 3")
- if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
+ if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+ shrinkage.init < 0 ||
shrinkage.init > 1)
stop("bad input for argument 'shrinkage.init'")
if (!is.null(nsimEIM)) {
- if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE))
+ if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE))
stop("bad input for argument 'nsimEIM'")
if (nsimEIM <= 10)
warning("argument 'nsimEIM' should be an integer ",
"greater than 10, say")
}
- if (mode(lmuuu) != "character" && mode(lmuuu) != "name")
- lmuuu = as.character(substitute(lmuuu))
- if (mode(lsize) != "character" && mode(lsize) != "name")
- lsize = as.character(substitute(lsize))
- if (!is.list(emuuu)) emuuu = list()
- if (!is.list(esize)) esize = list()
-
if (!is.logical( parallel ) || length( parallel ) != 1)
stop("argument 'parallel' must be TRUE or FALSE")
@@ -2758,12 +2826,14 @@ negbinomial.control <- function(save.weight = TRUE, ...)
ans =
new("vglmff",
+
blurb = c("Negative-binomial distribution\n\n",
"Links: ",
namesof("mu", lmuuu, earg = emuuu), ", ",
namesof("size", lsize, earg = esize), "\n",
"Mean: mu\n",
- "Variance: mu * (1 + mu / size)"),
+ "Variance: mu * (1 + mu / size) for NB-2"),
+
constraints = eval(substitute(expression({
dotzero <- .zero
@@ -2771,7 +2841,7 @@ negbinomial.control <- function(save.weight = TRUE, ...)
eval(negzero.expression)
if ( .parallel && ncol(cbind(y)) > 1)
- stop("univariate responses needed if parallel = TRUE")
+ stop("univariate responses needed if 'parallel = TRUE'")
constraints = cm.vgam(matrix(1, M, 1), x, .parallel, constraints)
}), list( .parallel = parallel, .zero = zero ))),
@@ -2787,7 +2857,7 @@ negbinomial.control <- function(save.weight = TRUE, ...)
( .lmuuu == "loge") && ( .lsize == "loge"),
envir = VGAM:::VGAMenv)
if (any(function.name == c("cqo","cao")) &&
- is.Numeric( .zero, allow = 1) && .zero != -2)
+ is.Numeric( .zero, allowable.length = 1) && .zero != -2)
stop("argument zero = -2 is required")
if (any(y < 0))
@@ -2795,6 +2865,7 @@ negbinomial.control <- function(save.weight = TRUE, ...)
if (any(round(y) != y))
stop("integer-values only allowed for the 'negbinomial' family")
+
y = as.matrix(y)
M = Musual * ncol(y)
NOS = ncoly = ncol(y) # Number of species
@@ -2810,17 +2881,18 @@ negbinomial.control <- function(save.weight = TRUE, ...)
}
if (is.numeric( .mu.init ))
- MU.INIT <- matrix( .mu.init, nrow(y), ncol(y), byrow = TRUE)
+ MU.INIT <- matrix( .mu.init , nrow(y), ncol(y), byrow = TRUE)
+
if (!length(etastart)) {
mu.init = y
for(iii in 1:ncol(y)) {
use.this = if ( .imethod == 1) {
- weighted.mean(y[, iii], w) + 1/16
+ weighted.mean(y[, iii], w) + 1/16
} else if ( .imethod == 3) {
- c(quantile(y[, iii], probs = .quantile.probs) + 1/16)
+ c(quantile(y[, iii], probs = .quantile.probs ) + 1/16)
} else {
- median(y[, iii]) + 1/16
+ median(y[, iii]) + 1/16
}
if (is.numeric( .mu.init )) {
@@ -2828,7 +2900,7 @@ negbinomial.control <- function(save.weight = TRUE, ...)
} else {
medabsres = median(abs(y[, iii] - use.this)) + 1/32
allowfun = function(z, maxtol=1) sign(z)*pmin(abs(z), maxtol)
- mu.init[, iii] = use.this + (1 - .sinit) *
+ mu.init[, iii] = use.this + (1 - .sinit ) *
allowfun(y[, iii] - use.this, maxtol = medabsres)
mu.init[, iii] = abs(mu.init[, iii]) + 1 / 1024
@@ -2844,16 +2916,22 @@ negbinomial.control <- function(save.weight = TRUE, ...)
}
k.grid = 2^((-7):7)
k.grid = 2^(seq(-8, 8, length = 40))
- kay.init = matrix(0, nr=n, nc=NOS)
+ kay.init = matrix(0, nrow = n, ncol = NOS)
for(spp. in 1:NOS) {
- kay.init[,spp.] = getMaxMin(k.grid,
- objfun=negbinomial.Loglikfun,
- y = y[,spp.], x = x, w = w,
- extraargs= mu.init[, spp.])
+ kay.init[, spp.] = getMaxMin(k.grid,
+ objfun = negbinomial.Loglikfun,
+ y = y[, spp.], x = x, w = w,
+ extraargs = mu.init[, spp.])
}
}
- etastart = cbind(theta2eta(mu.init, link = .lmuuu, earg = .emuuu),
- theta2eta(kay.init, link = .lsize, earg = .esize))
+
+ newemu = if ( .lmuuu == "nbcanlink") {
+ c(list(size = kay.init), .emuuu)
+ } else {
+ .emuuu
+ }
+ etastart = cbind(theta2eta(mu.init , link = .lmuuu , earg = newemu ),
+ theta2eta(kay.init, link = .lsize , earg = .esize ))
etastart = etastart[, interleave.VGAM(M, M = Musual), drop = FALSE]
}
}), list( .lmuuu = lmuuu, .lsize = lsize,
@@ -2864,90 +2942,150 @@ negbinomial.control <- function(save.weight = TRUE, ...)
.zero = zero, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Musual <- 2
- NOS = ncol(eta) / Musual
- eta2theta(eta[, 2*(1:NOS)-1, drop = FALSE],
- .lmuuu, earg = .emuuu)
- }, list( .lmuuu = lmuuu, .emuuu = emuuu,
- .esize = esize ))),
+ Musual <- 2
+ NOS = ncol(eta) / Musual
+ eta.k = eta[, Musual*(1:NOS) , drop = FALSE]
+ kmat = eta2theta(eta.k, .lsize , earg = .esize )
+
+ newemu = if ( .lmuuu == "nbcanlink") {
+ c(list(size = kmat), .emuuu)
+ } else {
+ .emuuu
+ }
+
+ eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE], .lmuuu ,
+ earg = newemu)
+ }, list( .lmuuu = lmuuu, .lsize = lsize,
+ .emuuu = emuuu, .esize = esize ))),
last = eval(substitute(expression({
- if (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
- rm("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)
-
- temp0303 = c(rep( .lmuuu, length = NOS),
- rep( .lsize, length = NOS))
- names(temp0303) = c(if (NOS == 1) "mu" else
- paste("mu", 1:NOS, sep = ""),
- if (NOS == 1) "size" else
- paste("size", 1:NOS, sep = ""))
- temp0303 = temp0303[interleave.VGAM(M, M = 2)]
- misc$link = temp0303 # Already named
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- for(ii in 1:NOS) {
- misc$earg[[2*ii-1]] = .emuuu
- misc$earg[[2*ii ]] = .esize
- }
- misc$cutoff = .cutoff
- misc$imethod = .imethod
- misc$nsimEIM = .nsimEIM
- misc$expected = TRUE
- misc$shrinkage.init = .sinit
+ if (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
+ rm("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)
+
+ temp0303 = c(rep( .lmuuu, length = NOS),
+ rep( .lsize, length = NOS))
+ names(temp0303) = c(if (NOS == 1) "mu" else
+ paste("mu", 1:NOS, sep = ""),
+ if (NOS == 1) "size" else
+ paste("size", 1:NOS, sep = ""))
+ temp0303 = temp0303[interleave.VGAM(M, M = 2)]
+ misc$link = temp0303 # Already named
+ misc$earg = vector("list", M)
+ names(misc$earg) = names(misc$link)
+ for(ii in 1:NOS) {
+ misc$earg[[Musual*ii-1]] = newemu
+ misc$earg[[Musual*ii ]] = .esize
+ }
+
+ misc$cutoff = .cutoff
+ misc$imethod = .imethod
+ misc$nsimEIM = .nsimEIM
+ misc$expected = TRUE
+ misc$shrinkage.init = .sinit
}), list( .lmuuu = lmuuu, .lsize = lsize,
.emuuu = emuuu, .esize = esize,
.cutoff = cutoff,
.nsimEIM = nsimEIM,
.sinit = shrinkage.init,
.imethod = imethod ))),
+
linkfun = eval(substitute(function(mu, extra = NULL) {
- temp = theta2eta(mu, .lmuuu, earg = .emuuu)
- kayy = theta2eta(if (is.numeric( .isize)) .isize else 1.0,
- .lsize, earg = .esize)
- kayy = 0 * temp + kayy # Right dimension now.
- temp = cbind(temp, kayy)
- temp[, interleave.VGAM(ncol(temp), M = 2), drop = FALSE]
- }, list( .lmuuu = lmuuu, .emuuu = emuuu,
- .lsize = lsize, .esize = esize,
- .isize = isize ))),
+ Musual <- 2
+
+
+ eta.temp = theta2eta(mu, .lmuuu , earg = newemu)
+ eta.kayy = theta2eta(if (is.numeric( .isize )) .isize else 1.0,
+ .lsize , earg = .esize )
+ eta.kayy = 0 * eta.temp + eta.kayy # Right dimension now.
+
+
+ newemu = if ( .lmuuu == "nbcanlink") {
+ c(list(size = eta2theta(eta.kayy, .lsize, earg = .esize)), .emuuu)
+ } else {
+ .emuuu
+ }
+
+ eta.temp = cbind(eta.temp, eta.kayy)
+ eta.temp[, interleave.VGAM(ncol(eta.temp), M = Musual), drop = FALSE]
+ }, list( .lmuuu = lmuuu, .lsize = lsize,
+ .emuuu = emuuu, .esize = esize,
+ .isize = isize ))),
+
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- NOS = ncol(eta) / 2
- temp300 = eta[, 2*(1:NOS), drop = FALSE]
+ Musual <- 2
+ NOS = ncol(eta) / Musual
+
+ eta.k = eta[, Musual*(1:NOS), drop = FALSE]
if ( .lsize == "loge") {
- bigval = 28
- temp300 = ifelse(temp300 > bigval, bigval, temp300)
- temp300 = ifelse(temp300 < -bigval, -bigval, temp300)
+ bigval = 68
+ eta.k = ifelse(eta.k > bigval, bigval, eta.k)
+ eta.k = ifelse(eta.k < -bigval, -bigval, eta.k)
+ }
+ kmat = eta2theta(eta.k, .lsize , earg = .esize )
+
+ newemu = if ( .lmuuu == "nbcanlink") {
+ c(list(size = kmat), .emuuu)
+ } else {
+ .emuuu
}
- kmat = eta2theta(temp300, .lsize, earg = .esize)
+
+
+
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w * dnbinom(x = y, mu = mu, size = kmat, log = TRUE))
- }, list( .lsize = lsize, .emu = emu, .esize = esize ))),
+ }, list( .lsize = lsize,
+ .lmuuu = lmuuu, .emuuu = emuuu, .esize = esize ))),
+
vfamily = c("negbinomial"),
+
deriv = eval(substitute(expression({
- NOS = ncol(eta) / 2
+ Musual <- 2
+ NOS = ncol(eta) / Musual
M = ncol(eta)
- temp3 = eta[, 2*(1:NOS), drop = FALSE]
- bigval = 28
- temp3 = ifelse(temp3 > bigval, bigval, temp3)
- temp3 = ifelse(temp3 < -bigval, -bigval, temp3)
- kmat = eta2theta(temp3, .lsize, earg = .esize)
+ eta.k = eta[, Musual*(1:NOS) , drop = FALSE]
+ if ( .lsize == "loge") {
+ bigval = 68
+ eta.k = ifelse(eta.k > bigval, bigval, eta.k)
+ eta.k = ifelse(eta.k < -bigval, -bigval, eta.k)
+ }
+ kmat = eta2theta(eta.k, .lsize , earg = .esize )
- dl.dmu = y/mu - (y+kmat)/(kmat+mu)
- dl.dk = digamma(y+kmat) - digamma(kmat) - (y+kmat)/(mu+kmat) + 1 +
- log(kmat/(kmat+mu))
+ newemu = if ( .lmuuu == "nbcanlink") {
+ c(list(size = kmat), .emuuu)
+ } else {
+ .emuuu
+ }
+
+ dl.dmu = y / mu - (y + kmat) / (mu + kmat)
+ dl.dk = digamma(y + kmat) - digamma(kmat) -
+ (y + kmat) / (mu + kmat) + 1 + log(kmat / (kmat + mu))
+
+ dmu.deta = dtheta.deta(mu, .lmuuu ,
+ earg = c(list(wrt.eta = 1), newemu)) # eta1
+ dk.deta1 = dtheta.deta(mu, .lmuuu ,
+ earg = c(list(wrt.eta = 2), newemu))
+ dk.deta2 = dtheta.deta(kmat, .lsize , earg = .esize)
- dmu.deta = dtheta.deta(mu, .lmu, earg = .emu)
- dk.deta = dtheta.deta(kmat, .lsize, earg = .esize)
+ myderiv = c(w) * cbind(dl.dmu * dmu.deta,
+ dl.dk * dk.deta2)
+
+
+ if ( .lmuuu == "nbcanlink") {
+ myderiv[, 1:NOS] =
+ myderiv[, 1:NOS] + c(w) * dl.dk * dk.deta1
+ }
+
+ myderiv[, interleave.VGAM(M, M = Musual)]
+ }), list( .lmuuu = lmuuu, .lsize = lsize,
+ .emuuu = emuuu, .esize = esize ))),
- dthetas.detas = cbind(dmu.deta, dk.deta)
- myderiv = c(w) * cbind(dl.dmu, dl.dk) * dthetas.detas
- myderiv[, interleave.VGAM(M, M = 2)]
- }), list( .lmu = lmu, .lsize = lsize, .emu = emu, .esize = esize ))),
weight = eval(substitute(expression({
- wz = matrix(as.numeric(NA), n, M) # wz is 'diagonal'
- if (is.null( .nsimEIM)) {
+ wz = matrix(as.numeric(NA), n, M)
+
+
+ if (is.null( .nsimEIM )) {
fred2 = dotFortran(name = "enbin9", ans = double(n*NOS),
as.double(kmat), as.double(mu), as.double( .cutoff ),
as.integer(n), ok = as.integer(1), as.integer(NOS),
@@ -2957,49 +3095,75 @@ negbinomial.control <- function(save.weight = TRUE, ...)
stop("error in Fortran subroutine exnbin9")
dim(fred2$ans) = c(n, NOS)
ed2l.dk2 = -fred2$ans - 1/kmat + 1/(kmat+mu)
- wz[,2*(1:NOS)] = dk.deta^2 * ed2l.dk2
+ wz[, Musual*(1:NOS)] = dk.deta2^2 * ed2l.dk2
+
+
+
} else {
+
run.varcov = matrix(0, n, NOS)
- ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+
for(ii in 1:( .nsimEIM )) {
ysim = rnbinom(n = n*NOS, mu = c(mu), size = c(kmat))
if (NOS > 1) dim(ysim) = c(n, NOS)
- dl.dk = digamma(ysim+kmat) - digamma(kmat) -
- (ysim+kmat)/(mu+kmat) + 1 + log(kmat/(kmat+mu))
- run.varcov = run.varcov + dl.dk^2
- }
- run.varcov = cbind(run.varcov / .nsimEIM)
- wz[, 2*(1:NOS)] = if (intercept.only)
- matrix(colMeans(run.varcov),
- n, ncol(run.varcov), byrow = TRUE) else run.varcov
+ dl.dk = digamma(ysim + kmat) - digamma(kmat) -
+ (ysim + kmat) / (mu + kmat) +
+ 1 + log(kmat / (kmat + mu))
+ run.varcov = run.varcov + dl.dk^2
+ } # end of for loop
+
+ run.varcov = cbind(run.varcov / .nsimEIM )
+ ed2l.dk2 = if (intercept.only)
+ matrix(colMeans(run.varcov),
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
+
+ wz[, Musual*(1:NOS)] = ed2l.dk2 * dk.deta2^2
+ } # end of else
+
+
+ ed2l.dmu2 = 1 / mu - 1 / (mu + kmat)
+ wz[, Musual*(1:NOS)-1] = ed2l.dmu2 * dmu.deta^2
+
+
+
+ if ( .lmuuu == "nbcanlink") {
+ wz[, Musual*(1:NOS)-1] =
+ wz[, Musual*(1:NOS)-1] + ed2l.dk2 * dk.deta1^2
+
+ wz = cbind(wz,
+ kronecker(ed2l.dk2 * dk.deta1 * dk.deta2,
+ if (NOS > 1) cbind(1, 0) else 1))
+ }
+
- wz[, 2*(1:NOS)] = wz[, 2*(1:NOS)] * dk.deta^2
- }
- ed2l.dmu2 = 1 / mu - 1 / (mu+kmat)
- wz[, 2*(1:NOS)-1] = dmu.deta^2 * ed2l.dmu2
c(w) * wz
- }), list( .cutoff = cutoff, .Maxiter = Maxiter,
+ }), list( .cutoff = cutoff,
+ .Maxiter = Maxiter,
+ .lmuuu = lmuuu,
.nsimEIM = nsimEIM ))))
if (deviance.arg) ans at deviance = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- NOS = ncol(eta) / 2
- temp300 = eta[, 2*(1:NOS), drop = FALSE]
+ Musual = 2
+ NOS = ncol(eta) / Musual
+ temp300 = eta[, Musual*(1:NOS), drop = FALSE]
if ( .lsize == "loge") {
- bigval = 28
+ bigval = 68
temp300[temp300 > bigval] = bigval
temp300[temp300 < -bigval] = -bigval
} else stop("can only handle the 'loge' link")
- k = eta2theta(temp300, .lsize, earg = .esize)
- devi = 2 * (y*log(ifelse(y < 1, 1, y)/mu) + (y+k)*log((mu+k)/(k+y)))
+ kmat = eta2theta(temp300, .lsize, earg = .esize )
+ devi = 2 * (y * log(ifelse(y < 1, 1, y)/mu) +
+ (y+kmat) * log((mu+kmat)/(kmat+y)))
if (residuals) {
sign(y - mu) * sqrt(abs(devi) * w)
} else {
sum(w * devi)
}
- }, list( .lsize = lsize, .emu = emu, .esize = esize )))
+ }, list( .lsize = lsize, .emuuu = emuuu,
+ .esize = esize )))
ans
}
@@ -3029,19 +3193,21 @@ polya.control <- function(save.weight = TRUE, ...)
- if (length(iprob) && !is.Numeric(iprob, posit = TRUE))
+ if (length(iprob) && !is.Numeric(iprob, positive = TRUE))
stop("bad input for argument 'iprob'")
- if (length(isize) && !is.Numeric(isize, posit = TRUE))
+ if (length(isize) && !is.Numeric(isize, positive = TRUE))
stop("bad input for argument 'isize'")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 3)
stop("argument 'imethod' must be 1 or 2 or 3")
- if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
+ if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+ shrinkage.init < 0 ||
shrinkage.init > 1)
stop("bad input for argument 'shrinkage.init'")
- if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE))
+ if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE))
stop("bad input for argument 'nsimEIM'")
if (nsimEIM <= 10)
warning("argument 'nsimEIM' should be an integer ",
@@ -3051,6 +3217,7 @@ polya.control <- function(save.weight = TRUE, ...)
lprob = as.character(substitute(lprob))
if (mode(lsize) != "character" && mode(lsize) != "name")
lsize = as.character(substitute(lsize))
+
if (!is.list(eprob)) eprob = list()
if (!is.list(esize)) esize = list()
@@ -3148,9 +3315,9 @@ polya.control <- function(save.weight = TRUE, ...)
kayy.init = matrix(0, nrow = n, ncol = NOS)
for(spp. in 1:NOS) {
kayy.init[,spp.] = getMaxMin(k.grid,
- objfun=negbinomial.Loglikfun,
+ objfun = negbinomial.Loglikfun,
y = y[,spp.], x = x, w = w,
- extraargs= mu.init[,spp.])
+ extraargs = mu.init[,spp.])
}
}
@@ -3215,7 +3382,7 @@ polya.control <- function(save.weight = TRUE, ...)
.lprob, earg = .eprob)
temp300 = eta[, Musual*(1:NOS) , drop = FALSE]
if ( .lsize == "loge") {
- bigval = 28
+ bigval = 68
temp300 = ifelse(temp300 > bigval, bigval, temp300)
temp300 = ifelse(temp300 < -bigval, -bigval, temp300)
}
@@ -3235,7 +3402,7 @@ polya.control <- function(save.weight = TRUE, ...)
.lprob, earg = .eprob)
temp3 = eta[, Musual*(1:NOS) , drop = FALSE]
if ( .lsize == "loge") {
- bigval = 28
+ bigval = 68
temp3 = ifelse(temp3 > bigval, bigval, temp3)
temp3 = ifelse(temp3 < -bigval, -bigval, temp3)
}
@@ -3306,7 +3473,7 @@ polya.control <- function(save.weight = TRUE, ...)
NOS = ncol(eta) / 2
temp300 = eta[, 2*(1:NOS), drop = FALSE]
if ( .lsize == "loge") {
- bigval = 28
+ bigval = 68
temp300[temp300 > bigval] = bigval
temp300[temp300 < -bigval] = -bigval
} else {
@@ -3387,22 +3554,24 @@ polya.control <- function(save.weight = TRUE, ...)
imethod = 1)
{
+ if (mode(ldf) != "character" && mode(ldf) != "name")
+ ldf <- as.character(substitute(ldf))
+
ldof <- ldf
edof <- edf
idof <- idf
- if (mode(ldof) != "character" && mode(ldof) != "name")
- ldof <- as.character(substitute(ldof))
if (!is.list(edof)) edof <- list()
if (length(idof))
if (!is.Numeric(idof) || any(idof <= 1))
stop("argument 'idf' should be > 1")
- if (!is.Numeric(tol1, posit = TRUE))
+ if (!is.Numeric(tol1, positive = TRUE))
stop("argument 'tol1' should be positive")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 3)
stop("argument 'imethod' must be 1 or 2 or 3")
@@ -3411,7 +3580,7 @@ polya.control <- function(save.weight = TRUE, ...)
blurb = c("Student t-distribution\n\n",
"Link: ",
namesof("df", ldof, earg = edof), "\n",
- "Variance: df/(df-2) if df > 2\n"),
+ "Variance: df / (df - 2) if df > 2\n"),
infos = eval(substitute(function(...) {
list(Musual = 1,
tol1 = .tol1 )
@@ -3438,7 +3607,7 @@ polya.control <- function(save.weight = TRUE, ...)
etastart <- rep(theta2eta(init.df, .ldof , earg = .edof ),
- len = length(y))
+ length.out = length(y))
}
}), list( .ldof = ldof, .edof = edof, .idof = idof,
.tol1 = tol1, .imethod = imethod ))),
@@ -3528,23 +3697,26 @@ polya.control <- function(save.weight = TRUE, ...)
+ if (mode(llocation) != "character" && mode(llocation) != "name")
+ llocation <- as.character(substitute(llocation))
+ if (!is.list(elocation)) elocation <- list()
+
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale <- as.character(substitute(lscale))
+ if (!is.list(escale)) escale <- list()
+
+ if (mode(ldf) != "character" && mode(ldf) != "name")
+ ldf <- as.character(substitute(ldf))
+ if (!is.list(edf)) edf <- list()
+
+
lloc <- llocation; lsca <- lscale; ldof <- ldf
eloc <- elocation; esca <- escale; edof <- edf
iloc <- ilocation; isca <- iscale; idof <- idf
- if (mode(lloc) != "character" && mode(lloc) != "name")
- lloc <- as.character(substitute(lloc))
- if (!is.list(eloc)) eloc <- list()
-
- if (mode(lsca) != "character" && mode(lsca) != "name")
- lsca <- as.character(substitute(lsca))
- if (!is.list(esca)) esca <- list()
-
- if (mode(ldof) != "character" && mode(ldof) != "name")
- ldof <- as.character(substitute(ldof))
- if (!is.list(edof)) edof <- list()
+
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 3)
stop("argument 'imethod' must be 1 or 2 or 3")
@@ -3552,7 +3724,7 @@ polya.control <- function(save.weight = TRUE, ...)
if (!is.Numeric(iloc))
stop("bad input in argument 'ilocation'")
if (length(isca))
- if (!is.Numeric(isca, posit = TRUE))
+ if (!is.Numeric(isca, positive = TRUE))
stop("argument 'iscale' should be positive")
if (length(idof))
if (!is.Numeric(idof) || any(idof <= 1))
@@ -3608,14 +3780,14 @@ polya.control <- function(save.weight = TRUE, ...)
init.sca <- if (length( .isca )) .isca else
sdvec / 2.3
- sdvec <- rep(sdvec, len = max(length(sdvec), length(init.sca)))
- init.sca <- rep(init.sca, len = max(length(sdvec), length(init.sca)))
+ sdvec <- rep(sdvec, length.out = max(length(sdvec), length(init.sca)))
+ init.sca <- rep(init.sca, length.out = max(length(sdvec), length(init.sca)))
ind9 <- (sdvec / init.sca <= (1 + 0.12))
sdvec[ind9] <- sqrt(1.12) * init.sca[ind9]
init.dof <- if (length( .idof )) .idof else
(2 * (sdvec / init.sca)^2) / ((sdvec / init.sca)^2 - 1)
if (!is.Numeric(init.dof) || init.dof <= 1)
- init.dof <- rep(3, len = ncoly)
+ init.dof <- rep(3, length.out = ncoly)
mat1 <- matrix(theta2eta(init.loc, .lloc, earg = .eloc), n, NOS,
byrow = TRUE)
@@ -3768,25 +3940,27 @@ polya.control <- function(save.weight = TRUE, ...)
{
+ if (mode(llocation) != "character" && mode(llocation) != "name")
+ llocation <- as.character(substitute(llocation))
+
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale <- as.character(substitute(lscale))
+
lloc <- llocation; lsca <- lscale
eloc <- elocation; esca <- escale
iloc <- ilocation; isca <- iscale
doff <- df
- if (mode(lloc) != "character" && mode(lloc) != "name")
- lloc <- as.character(substitute(lloc))
if (!is.list(eloc)) eloc <- list()
-
- if (mode(lsca) != "character" && mode(lsca) != "name")
- lsca <- as.character(substitute(lsca))
if (!is.list(esca)) esca <- list()
+
if (is.finite(doff))
- if (!is.Numeric(doff, posit = TRUE))
+ if (!is.Numeric(doff, positive = TRUE))
stop("argument 'df' must be positive")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 3)
stop("argument 'imethod' must be 1 or 2 or 3")
@@ -3794,7 +3968,7 @@ polya.control <- function(save.weight = TRUE, ...)
if (!is.Numeric(iloc))
stop("bad input in argument 'ilocation'")
if (length(isca))
- if (!is.Numeric(isca, posit = TRUE))
+ if (!is.Numeric(isca, positive = TRUE))
stop("argument 'iscale' should be positive")
@@ -4036,12 +4210,12 @@ dsimplex = function(x, mu = 0.5, dispersion = 1, log = FALSE) {
rsimplex = function(n, mu = 0.5, dispersion = 1) {
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
+ if (!is.Numeric(n, integer.valued = TRUE, allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
oneval <- (length(mu) == 1 && length(dispersion) == 1)
- answer = rep(0.0, len = use.n)
- mu = rep(mu, len = use.n); dispersion = rep(dispersion, len = use.n)
+ answer = rep(0.0, length.out = use.n)
+ mu = rep(mu, length.out = use.n); dispersion = rep(dispersion, length.out = use.n)
Kay1 = 3 * (dispersion * mu * (1-mu))^2
if (oneval) {
@@ -4053,7 +4227,7 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
myroots = myroots[myroots >= 0.0]
myroots = myroots[myroots <= 1.0]
pdfmax = dsimplex(myroots, mymu, dispersion[1])
- pdfmax = rep(max(pdfmax), len = use.n) # For multiple peaks
+ pdfmax = rep(max(pdfmax), length.out = use.n) # For multiple peaks
} else {
pdfmax = numeric(use.n)
for (ii in 1:use.n) {
@@ -4102,13 +4276,17 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
if (!is.list(emu)) emu = list()
if (!is.list(esigma)) esigma = list()
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 3)
stop("argument 'imethod' must be 1 or 2 or 3")
- if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
- shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
+ if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+ shrinkage.init < 0 ||
+ shrinkage.init > 1)
+ stop("bad input for argument 'shrinkage.init'")
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
new("vglmff",
@@ -4159,7 +4337,7 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
.imu = imu, .isigma = isigma,
.sinit = shrinkage.init, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[,1], .lmu, earg = .emu)
+ eta2theta(eta[, 1], .lmu, earg = .emu)
}, list( .lmu = lmu, .emu = emu ))),
last = eval(substitute(expression({
misc$link = c(mu = .lmu, sigma = .lsigma)
@@ -4174,7 +4352,7 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
.sinit = shrinkage.init, .imethod = imethod ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- sigma = eta2theta(eta[,2], .lsigma, earg = .esigma)
+ sigma = eta2theta(eta[, 2], .lsigma, earg = .esigma)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(w * dsimplex(x = y, mu = mu, dispersion = sigma, log = TRUE))
@@ -4185,7 +4363,7 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
deriv = eval(substitute(expression({
deeFun = function(y, mu)
(((y - mu) / (mu * (1 - mu)))^2) / (y * (1 - y))
- sigma = eta2theta(eta[,2], .lsigma, earg = .esigma)
+ sigma = eta2theta(eta[, 2], .lsigma, earg = .esigma)
dmu.deta = dtheta.deta(mu, .lmu, earg = .emu)
dsigma.deta = dtheta.deta(sigma, .lsigma, earg = .esigma)
@@ -4214,17 +4392,19 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
rig = function(lmu = "identity", llambda = "loge",
- emu = list(), elambda = list(), imu = NULL, ilambda=1)
+ emu = list(), elambda = list(), imu = NULL, ilambda=1)
{
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu = as.character(substitute(lmu))
- if (mode(llambda) != "character" && mode(llambda) != "name")
- llambda = as.character(substitute(llambda))
- if (!is.Numeric(ilambda, posit = TRUE))
- stop("bad input for 'ilambda'")
- if (!is.list(emu)) emu = list()
- if (!is.list(elambda)) elambda = list()
+ if (mode(lmu) != "character" && mode(lmu) != "name")
+ lmu = as.character(substitute(lmu))
+ if (mode(llambda) != "character" && mode(llambda) != "name")
+ llambda = as.character(substitute(llambda))
+
+ if (!is.Numeric(ilambda, positive = TRUE))
+ stop("bad input for 'ilambda'")
+
+ if (!is.list(emu)) emu = list()
+ if (!is.list(elambda)) elambda = list()
new("vglmff",
blurb = c("Reciprocal inverse Gaussian distribution \n",
@@ -4254,9 +4434,9 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
}
}), list( .lmu = lmu, .llambda = llambda,
.emu = emu, .elambda = elambda,
- .imu=imu, .ilambda=ilambda ))),
+ .imu=imu, .ilambda = ilambda ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[,1], .lmu, earg = .emu)
+ eta2theta(eta[, 1], .lmu, earg = .emu)
}, list( .lmu = lmu,
.emu = emu, .elambda = elambda ))),
last = eval(substitute(expression({
@@ -4268,7 +4448,7 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
.emu = emu, .elambda = elambda ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- lambda = eta2theta(eta[,2], .llambda, earg = .elambda)
+ lambda = eta2theta(eta[, 2], .llambda, earg = .elambda)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w * (-0.5*log(y) + 0.5*log(lambda) - (0.5*lambda/y) * (y-mu)^2))
@@ -4282,7 +4462,7 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
c("mu", "lambda"), hessian= TRUE)
}
- lambda = eta2theta(eta[,2], .llambda, earg = .elambda)
+ lambda = eta2theta(eta[, 2], .llambda, earg = .elambda)
eval.d3 = eval(d3)
dl.dthetas = attr(eval.d3, "gradient")
@@ -4298,15 +4478,15 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
d2l.dthetas2 = attr(eval.d3, "hessian")
wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
- wz[,iam(1,1,M)] = -d2l.dthetas2[,1,1] * dtheta.detas[,1]^2
- wz[,iam(2,2,M)] = -d2l.dthetas2[,2,2] * dtheta.detas[,2]^2
- wz[,iam(1,2,M)] = -d2l.dthetas2[,1,2] * dtheta.detas[,1] *
- dtheta.detas[,2]
+ wz[,iam(1,1,M)] = -d2l.dthetas2[, 1,1] * dtheta.detas[, 1]^2
+ wz[,iam(2,2,M)] = -d2l.dthetas2[, 2,2] * dtheta.detas[, 2]^2
+ wz[,iam(1,2,M)] = -d2l.dthetas2[, 1,2] * dtheta.detas[, 1] *
+ dtheta.detas[, 2]
if (!.expected) {
d2mudeta2 = d2theta.deta2(mu, .lmu, earg = .emu)
d2lambda = d2theta.deta2(lambda, .llambda, earg = .elambda)
- wz[,iam(1,1,M)] = wz[,iam(1,1,M)] - dl.dthetas[,1] * d2mudeta2
- wz[,iam(2,2,M)] = wz[,iam(2,2,M)] - dl.dthetas[,2] * d2lambda
+ wz[,iam(1,1,M)] = wz[,iam(1,1,M)] - dl.dthetas[, 1] * d2mudeta2
+ wz[,iam(2,2,M)] = wz[,iam(2,2,M)] - dl.dthetas[, 2] * d2lambda
}
if (intercept.only) {
@@ -4326,7 +4506,7 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
hypersecant = function(link.theta = "elogit",
- earg = if (link.theta == "elogit") list(min=-pi/2, max=pi/2) else list(),
+ earg = if (link.theta == "elogit") list(min = -pi/2, max = pi/2) else list(),
init.theta = NULL)
{
@@ -4384,7 +4564,8 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
hypersecant.1 = function(link.theta = "elogit",
- earg=if (link.theta == "elogit") list(min=-pi/2, max=pi/2) else list(),
+ earg = if (link.theta == "elogit") list(min = -pi/2, max = pi/2) else
+ list(),
init.theta = NULL)
{
@@ -4457,8 +4638,10 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
lmu = as.character(substitute(lmu))
if (mode(llambda) != "character" && mode(llambda) != "name")
llambda = as.character(substitute(llambda))
+
if (is.Numeric(ilambda) && any(ilambda <= -1))
stop("ilambda must be > -1")
+
if (!is.list(emu)) emu = list()
if (!is.list(elambda)) elambda = list()
@@ -4489,10 +4672,10 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
etastart = cbind(theta2eta(mu.init, .lmu, earg = .emu),
theta2eta(lambda.init, .llambda, earg = .elambda))
}
- }), list( .lmu = lmu, .llambda = llambda, .imu=imu, .ilambda=ilambda,
+ }), list( .lmu = lmu, .llambda = llambda, .imu=imu, .ilambda = ilambda,
.emu = emu, .elambda = elambda ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[,1], .lmu, earg = .emu)
+ eta2theta(eta[, 1], .lmu, earg = .emu)
}, list( .lmu = lmu,
.emu = emu, .elambda = elambda ))),
last = eval(substitute(expression({
@@ -4504,7 +4687,7 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
.emu = emu, .elambda = elambda ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- lambda = eta2theta(eta[,2], .llambda, earg = .elambda)
+ lambda = eta2theta(eta[, 2], .llambda, earg = .elambda)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w * (-0.5*log(y*(1-y)) - 0.5 * lambda *
@@ -4514,7 +4697,7 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
.emu = emu, .elambda = elambda ))),
vfamily = c("leipnik"),
deriv = eval(substitute(expression({
- lambda = eta2theta(eta[,2], .llambda, earg = .elambda)
+ lambda = eta2theta(eta[, 2], .llambda, earg = .elambda)
dl.dthetas =
c(w) * cbind(dl.dmu = lambda*(y-mu) / (y*(1-y)+(y-mu)^2),
dl.dlambda= -0.5 * log1p((y-mu)^2 / (y*(1-y))) -
@@ -4530,25 +4713,25 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
if (is.R()) {
denominator = y*(1-y) + (y-mu)^2
d2l.dthetas2 = array(NA, c(n,2,2))
- d2l.dthetas2[,1,1] = c(w) * lambda*(-y*(1-y)+(y-mu)^2)/denominator^2
- d2l.dthetas2[,1,2] =
- d2l.dthetas2[,2,1] = c(w) * (y-mu) / denominator
- d2l.dthetas2[,2,2] = c(w) * (-0.25*trigamma((lambda+1)/2) +
+ d2l.dthetas2[, 1,1] = c(w) * lambda*(-y*(1-y)+(y-mu)^2)/denominator^2
+ d2l.dthetas2[, 1,2] =
+ d2l.dthetas2[, 2,1] = c(w) * (y-mu) / denominator
+ d2l.dthetas2[, 2,2] = c(w) * (-0.25*trigamma((lambda+1)/2) +
0.25*trigamma(1+lambda/2))
} else {
d2l.dthetas2 = attr(eval.d3, "hessian")
}
wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
- wz[,iam(1,1,M)] = -d2l.dthetas2[,1,1] * dtheta.detas[,1]^2
- wz[,iam(2,2,M)] = -d2l.dthetas2[,2,2] * dtheta.detas[,2]^2
- wz[,iam(1,2,M)] = -d2l.dthetas2[,1,2] * dtheta.detas[,1] *
- dtheta.detas[,2]
+ wz[,iam(1,1,M)] = -d2l.dthetas2[, 1,1] * dtheta.detas[, 1]^2
+ wz[,iam(2,2,M)] = -d2l.dthetas2[, 2,2] * dtheta.detas[, 2]^2
+ wz[,iam(1,2,M)] = -d2l.dthetas2[, 1,2] * dtheta.detas[, 1] *
+ dtheta.detas[, 2]
if (!.expected) {
d2mudeta2 = d2theta.deta2(mu, .lmu, earg = .emu)
d2lambda = d2theta.deta2(lambda, .llambda, earg = .elambda)
- wz[,iam(1,1,M)] = wz[,iam(1,1,M)] - dl.dthetas[,1] * d2mudeta2
- wz[,iam(2,2,M)] = wz[,iam(2,2,M)] - dl.dthetas[,2] * d2lambda
+ wz[,iam(1,1,M)] = wz[,iam(1,1,M)] - dl.dthetas[, 1] * d2mudeta2
+ wz[,iam(2,2,M)] = wz[,iam(2,2,M)] - dl.dthetas[, 2] * d2lambda
}
if (intercept.only) {
@@ -4581,8 +4764,11 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
lrho = as.character(substitute(lrho))
if (mode(llambda) != "character" && mode(llambda) != "name")
llambda = as.character(substitute(llambda))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
+
if (!is.list(erho)) erho = list()
if (!is.list(elambda)) elambda = list()
@@ -4619,10 +4805,10 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
}
}), list( .llambda = llambda, .lrho=lrho,
.elambda = elambda, .erho=erho,
- .ilambda=ilambda, .irho=irho ))),
+ .ilambda = ilambda, .irho=irho ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- rho = eta2theta(eta[,1], .lrho, earg = .erho)
- lambda = eta2theta(eta[,2], .llambda, earg = .elambda)
+ rho = eta2theta(eta[, 1], .lrho, earg = .erho)
+ lambda = eta2theta(eta[, 2], .llambda, earg = .elambda)
ifelse(rho > 0.5, lambda*(1-rho)/(2*rho-1), NA)
}, list( .llambda = llambda, .lrho=lrho,
.elambda = elambda, .erho=erho ))),
@@ -4634,8 +4820,8 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
.elambda = elambda, .erho=erho ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- rho = eta2theta(eta[,1], .lrho, earg = .erho)
- lambda = eta2theta(eta[,2], .llambda, earg = .elambda)
+ rho = eta2theta(eta[, 1], .lrho, earg = .erho)
+ lambda = eta2theta(eta[, 2], .llambda, earg = .elambda)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w*(log(lambda) - lgamma(2*y+lambda) - lgamma(y+1) -
@@ -4645,8 +4831,8 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
.elambda = elambda, .erho=erho ))),
vfamily = c("invbinomial"),
deriv = eval(substitute(expression({
- rho = eta2theta(eta[,1], .lrho, earg = .erho)
- lambda = eta2theta(eta[,2], .llambda, earg = .elambda)
+ rho = eta2theta(eta[, 1], .lrho, earg = .erho)
+ lambda = eta2theta(eta[, 2], .llambda, earg = .elambda)
dl.drho = (y + lambda)/rho - y/(1-rho)
dl.dlambda = 1/lambda - digamma(2*y+lambda) - digamma(y+lambda+1) +
log(rho)
@@ -4685,11 +4871,11 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
genpoisson = function(llambda = "elogit", ltheta = "loge",
- elambda=if (llambda == "elogit") list(min=-1,max = 1) else list(),
- etheta = list(),
- ilambda = NULL, itheta = NULL,
- use.approx = TRUE,
- imethod = 1, zero=1)
+ elambda = if (llambda == "elogit") list(min = -1, max = 1) else list(),
+ etheta = list(),
+ ilambda = NULL, itheta = NULL,
+ use.approx = TRUE,
+ imethod = 1, zero = 1)
{
@@ -4697,13 +4883,18 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
llambda = as.character(substitute(llambda))
if (mode(ltheta) != "character" && mode(ltheta) != "name")
ltheta = as.character(substitute(ltheta))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
+
if (!is.list(elambda)) elambda = list()
if (!is.list(etheta)) etheta = list()
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 2)
stop("argument 'imethod' must be 1 or 2")
+
if (!is.logical(use.approx) || length(use.approx) != 1)
stop("'use.approx' must be logical value")
@@ -4741,53 +4932,54 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
if (!length(etastart)) {
lambda = rep(if (length( .ilambda)) .ilambda else
init.lambda, length = n)
- theta = rep(if (length( .itheta)) .itheta else init.theta, length = n)
+ theta = rep(if (length( .itheta)) .itheta else init.theta,
+ length = n)
etastart = cbind(theta2eta(lambda, .llambda, earg = .elambda),
theta2eta(theta, .ltheta, earg = .etheta))
}
- }), list( .ltheta=ltheta, .llambda = llambda,
- .etheta=etheta, .elambda = elambda,
+ }), list( .ltheta = ltheta, .llambda = llambda,
+ .etheta = etheta, .elambda = elambda,
.imethod = imethod,
- .itheta=itheta, .ilambda=ilambda )) ),
+ .itheta = itheta, .ilambda = ilambda )) ),
linkinv = eval(substitute(function(eta, extra = NULL) {
- lambda = eta2theta(eta[,1], .llambda, earg = .elambda)
- theta = eta2theta(eta[,2], .ltheta, earg = .etheta)
- theta/(1-lambda)
- }, list( .ltheta=ltheta, .llambda = llambda,
- .etheta=etheta, .elambda = elambda ))),
+ lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
+ theta = eta2theta(eta[, 2], .ltheta, earg = .etheta)
+ theta / (1 - lambda)
+ }, list( .ltheta = ltheta, .llambda = llambda,
+ .etheta = etheta, .elambda = elambda ))),
last = eval(substitute(expression({
- misc$link = c(lambda=.llambda, theta=.ltheta)
- misc$earg = list(lambda=.elambda, theta=.etheta)
+ misc$link = c(lambda = .llambda , theta = .ltheta )
+ misc$earg = list(lambda = .elambda , theta = .etheta )
if (! .use.approx )
misc$pooled.weight = pooled.weight
- }), list( .ltheta=ltheta, .llambda = llambda,
+ }), list( .ltheta = ltheta, .llambda = llambda,
.use.approx = use.approx,
- .etheta=etheta, .elambda = elambda ))),
+ .etheta = etheta, .elambda = elambda ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- lambda = eta2theta(eta[,1], .llambda, earg = .elambda)
- theta = eta2theta(eta[,2], .ltheta, earg = .etheta)
+ lambda = eta2theta(eta[, 1], .llambda , earg = .elambda )
+ theta = eta2theta(eta[, 2], .ltheta , earg = .etheta )
index = (y == 0)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
- sum(w[index]*(-theta[index])) +
+ sum(w[index] * (-theta[index])) +
sum(w[!index] * (-y[!index]*lambda[!index]-theta[!index] +
(y[!index]-1)*log(theta[!index]+y[!index]*lambda[!index]) +
log(theta[!index]) - lgamma(y[!index]+1)) )
- }, list( .ltheta=ltheta, .llambda = llambda,
- .etheta=etheta, .elambda = elambda ))),
+ }, list( .ltheta = ltheta, .llambda = llambda,
+ .etheta = etheta, .elambda = elambda ))),
vfamily = c("genpoisson"),
deriv = eval(substitute(expression({
- lambda = eta2theta(eta[,1], .llambda, earg = .elambda)
- theta = eta2theta(eta[,2], .ltheta, earg = .etheta)
+ lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
+ theta = eta2theta(eta[, 2], .ltheta, earg = .etheta)
dl.dlambda = -y + y*(y-1)/(theta+y*lambda)
dl.dtheta = -1 + (y-1)/(theta+y*lambda) + 1/theta
dTHETA.deta = dtheta.deta(theta, .ltheta, earg = .etheta)
dlambda.deta = dtheta.deta(lambda, .llambda, earg = .elambda)
c(w) * cbind(dl.dlambda * dlambda.deta,
dl.dtheta * dTHETA.deta )
- }), list( .ltheta=ltheta, .llambda = llambda,
- .etheta=etheta, .elambda = elambda ))),
+ }), list( .ltheta = ltheta, .llambda = llambda,
+ .etheta = etheta, .elambda = elambda ))),
weight = eval(substitute(expression({
wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
if ( .use.approx ) {
@@ -4823,9 +5015,9 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
pooled.weight = FALSE
}
wz
- }), list( .ltheta=ltheta, .llambda = llambda,
+ }), list( .ltheta = ltheta, .llambda = llambda,
.use.approx = use.approx,
- .etheta=etheta, .elambda = elambda ))))
+ .etheta = etheta, .elambda = elambda ))))
}
@@ -4838,9 +5030,9 @@ dlgamma = function(x, location = 0, scale = 1, k = 1, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- if (!is.Numeric(scale, posit = TRUE))
+ if (!is.Numeric(scale, positive = TRUE))
stop("bad input for argument 'scale'")
- if (!is.Numeric(k, posit = TRUE))
+ if (!is.Numeric(k, positive = TRUE))
stop("bad input for argument 'k'")
z = (x-location) / scale
if (log.arg) {
@@ -4849,31 +5041,31 @@ dlgamma = function(x, location = 0, scale = 1, k = 1, log = FALSE) {
exp(k * z - exp(z)) / (scale * gamma(k))
}
}
-plgamma = function(q, location = 0, scale = 1, k=1) {
- if (!is.Numeric(scale, posit = TRUE))
- stop("bad input for argument 'scale'")
- if (!is.Numeric(k, posit = TRUE))
- stop("bad input for argument 'k'")
- z = (q-location)/scale
- pgamma(exp(z), k)
+
+
+plgamma = function(q, location = 0, scale = 1, k = 1) {
+
+ zedd = (q - location) / scale
+ ans = pgamma(exp(zedd), k)
+ ans[scale < 0] = NaN
+ ans
}
-qlgamma = function(p, location = 0, scale = 1, k=1) {
- if (!is.Numeric(scale, posit = TRUE))
+
+
+qlgamma = function(p, location = 0, scale = 1, k = 1) {
+ if (!is.Numeric(scale, positive = TRUE))
stop("bad input for argument 'scale'")
- if (!is.Numeric(k, posit = TRUE))
- stop("bad input for argument 'k'")
- q = qgamma(p, k)
- location + scale * log(q)
+
+ ans = location + scale * log(qgamma(p, k))
+ ans[scale < 0] = NaN
+ ans
}
-rlgamma = function(n, location = 0, scale = 1, k=1) {
- if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
- stop("bad input for argument 'n'")
- if (!is.Numeric(scale, posit = TRUE))
- stop("bad input for argument 'scale'")
- if (!is.Numeric(k, posit = TRUE))
- stop("bad input for argument 'k'")
- y = rgamma(n, k)
- location + scale * log(y)
+
+
+rlgamma = function(n, location = 0, scale = 1, k = 1) {
+ ans = location + scale * log(rgamma(n, k))
+ ans[scale < 0] = NaN
+ ans
}
@@ -4894,7 +5086,7 @@ rlgamma = function(n, location = 0, scale = 1, k=1) {
stop("response must be a vector or a one-column matrix")
predictors.names = namesof("k", .link, earg = .earg, tag = FALSE)
if (!length(etastart)) {
- k.init = if (length( .init.k)) rep( .init.k, len = length(y)) else {
+ k.init = if (length( .init.k)) rep( .init.k, length.out = length(y)) else {
medy = median(y)
if (medy < 2) 5 else if (medy < 4) 20 else exp(0.7 * medy)
}
@@ -4938,20 +5130,25 @@ rlgamma = function(n, location = 0, scale = 1, k=1) {
- lgamma3ff = function(llocation = "identity", lscale = "loge", lshape = "loge",
- elocation = list(), escale = list(), eshape = list(),
- ilocation = NULL, iscale = NULL, ishape = 1, zero = NULL)
+ lgamma3ff = function(
+ llocation = "identity", lscale = "loge", lshape = "loge",
+ elocation = list(), escale = list(), eshape = list(),
+ ilocation = NULL, iscale = NULL, ishape = 1, zero = NULL)
{
if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
+ llocation = as.character(substitute(llocation))
if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
+ lscale = as.character(substitute(lscale))
if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
- stop("bad input for argument 'zero'")
- if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
- stop("bad input for argument 'iscale'")
+ lshape = as.character(substitute(lshape))
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+ if (length(iscale) &&
+ !is.Numeric(iscale, positive = TRUE))
+ stop("bad input for argument 'iscale'")
+
if (!is.list(elocation)) elocation = list()
if (!is.list(escale)) escale = list()
if (!is.list(eshape)) eshape = list()
@@ -4976,14 +5173,14 @@ rlgamma = function(n, location = 0, scale = 1, k=1) {
namesof("scale", .lscale, earg = .escale, tag = FALSE),
namesof("shape", .lshape, earg = .eshape, tag = FALSE))
if (!length(etastart)) {
- k.init = if (length( .ishape)) rep( .ishape, len = length(y)) else {
- rep(exp(median(y)), len = length(y))
+ k.init = if (length( .ishape)) rep( .ishape, length.out = length(y)) else {
+ rep(exp(median(y)), length.out = length(y))
}
- scale.init = if (length( .iscale)) rep( .iscale, len = length(y)) else {
- rep(sqrt(var(y) / trigamma(k.init)), len = length(y))
+ scale.init = if (length( .iscale)) rep( .iscale, length.out = length(y)) else {
+ rep(sqrt(var(y) / trigamma(k.init)), length.out = length(y))
}
- loc.init = if (length( .iloc)) rep( .iloc, len = length(y)) else {
- rep(median(y) - scale.init * digamma(k.init), len = length(y))
+ loc.init = if (length( .iloc)) rep( .iloc, length.out = length(y)) else {
+ rep(median(y) - scale.init * digamma(k.init), length.out = length(y))
}
etastart = cbind(theta2eta(loc.init, .llocation, earg = .elocation),
theta2eta(scale.init, .lscale, earg = .escale),
@@ -4993,9 +5190,9 @@ rlgamma = function(n, location = 0, scale = 1, k=1) {
.elocation = elocation, .escale = escale, .eshape = eshape,
.iloc=ilocation, .iscale = iscale, .ishape = ishape ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[,1], .llocation, earg = .elocation) +
- eta2theta(eta[,2], .lscale, earg = .escale) *
- digamma(eta2theta(eta[,3], .lshape, earg = .eshape))
+ eta2theta(eta[, 1], .llocation, earg = .elocation) +
+ eta2theta(eta[, 2], .lscale, earg = .escale) *
+ digamma(eta2theta(eta[, 3], .lshape, earg = .eshape))
}, list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
.elocation = elocation, .escale = escale, .eshape = eshape ))),
last = eval(substitute(expression({
@@ -5005,9 +5202,9 @@ rlgamma = function(n, location = 0, scale = 1, k=1) {
.elocation = elocation, .escale = escale, .eshape = eshape ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = eta2theta(eta[,1], .llocation, earg = .elocation)
- bb = eta2theta(eta[,2], .lscale, earg = .escale)
- kk = eta2theta(eta[,3], .lshape, earg = .eshape)
+ aa = eta2theta(eta[, 1], .llocation, earg = .elocation)
+ bb = eta2theta(eta[, 2], .lscale, earg = .escale)
+ kk = eta2theta(eta[, 3], .lshape, earg = .eshape)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(w * dlgamma(x = y, location=aa, scale=bb, k=kk, log = TRUE))
@@ -5016,9 +5213,9 @@ rlgamma = function(n, location = 0, scale = 1, k=1) {
.elocation = elocation, .escale = escale, .eshape = eshape ))),
vfamily = c("lgamma3ff"),
deriv = eval(substitute(expression({
- a = eta2theta(eta[,1], .llocation, earg = .elocation)
- b = eta2theta(eta[,2], .lscale, earg = .escale)
- k = eta2theta(eta[,3], .lshape, earg = .eshape)
+ a = eta2theta(eta[, 1], .llocation, earg = .elocation)
+ b = eta2theta(eta[, 2], .lscale, earg = .escale)
+ k = eta2theta(eta[, 3], .lshape, earg = .eshape)
zedd = (y-a)/b
dl.da = (exp(zedd) - k) / b
dl.db = (zedd * (exp(zedd) - k) - 1) / b
@@ -5051,9 +5248,12 @@ rlgamma = function(n, location = 0, scale = 1, k=1) {
.elocation = elocation, .escale = escale, .eshape = eshape ))))
}
- prentice74 = function(llocation = "identity", lscale = "loge", lshape = "identity",
- elocation = list(), escale = list(), eshape = list(),
- ilocation = NULL, iscale = NULL, ishape = NULL, zero = 2:3)
+
+
+ prentice74 = function(
+ llocation = "identity", lscale = "loge", lshape = "identity",
+ elocation = list(), escale = list(), eshape = list(),
+ ilocation = NULL, iscale = NULL, ishape = NULL, zero = 2:3)
{
if (mode(llocation) != "character" && mode(llocation) != "name")
llocation = as.character(substitute(llocation))
@@ -5061,10 +5261,14 @@ rlgamma = function(n, location = 0, scale = 1, k=1) {
lscale = as.character(substitute(lscale))
if (mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
- stop("bad input for argument 'zero'")
- if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
- stop("bad input for argument 'iscale'")
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+ if (length(iscale) &&
+ !is.Numeric(iscale, positive = TRUE))
+ stop("bad input for argument 'iscale'")
+
if (!is.list(elocation)) elocation = list()
if (!is.list(escale)) escale = list()
if (!is.list(eshape)) eshape = list()
@@ -5090,15 +5294,15 @@ rlgamma = function(n, location = 0, scale = 1, k=1) {
namesof("shape", .lshape, earg = .eshape, tag = FALSE))
if (!length(etastart)) {
sdy = sqrt(var(y))
- k.init = if (length( .ishape)) rep( .ishape, len = length(y)) else {
+ k.init = if (length( .ishape)) rep( .ishape, length.out = length(y)) else {
skewness = mean((y-mean(y))^3) / sdy^3 # <0 Left Skewed
- rep(-skewness, len = length(y))
+ rep(-skewness, length.out = length(y))
}
- scale.init = if (length( .iscale)) rep( .iscale, len = length(y)) else {
- rep(sdy, len = length(y))
+ scale.init = if (length( .iscale)) rep( .iscale, length.out = length(y)) else {
+ rep(sdy, length.out = length(y))
}
- loc.init = if (length( .iloc)) rep( .iloc, len = length(y)) else {
- rep(median(y), len = length(y))
+ loc.init = if (length( .iloc)) rep( .iloc, length.out = length(y)) else {
+ rep(median(y), length.out = length(y))
}
etastart = cbind(theta2eta(loc.init, .llocation, earg = .elocation),
theta2eta(scale.init, .lscale, earg = .escale),
@@ -5108,7 +5312,7 @@ rlgamma = function(n, location = 0, scale = 1, k=1) {
.elocation = elocation, .escale = escale, .eshape = eshape,
.iloc=ilocation, .iscale = iscale, .ishape = ishape ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[,1], .llocation, earg = .elocation)
+ eta2theta(eta[, 1], .llocation, earg = .elocation)
}, list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
.elocation = elocation, .escale = escale, .eshape = eshape ))),
last = eval(substitute(expression({
@@ -5118,9 +5322,9 @@ rlgamma = function(n, location = 0, scale = 1, k=1) {
.elocation = elocation, .escale = escale, .eshape = eshape ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- a = eta2theta(eta[,1], .llocation, earg = .elocation)
- b = eta2theta(eta[,2], .lscale, earg = .escale)
- k = eta2theta(eta[,3], .lshape, earg = .eshape)
+ a = eta2theta(eta[, 1], .llocation, earg = .elocation)
+ b = eta2theta(eta[, 2], .lscale, earg = .escale)
+ k = eta2theta(eta[, 3], .lshape, earg = .eshape)
tmp55 = k^(-2)
doubw = (y-a)*k/b + digamma(tmp55)
if (residuals) stop("loglikelihood residuals not ",
@@ -5130,9 +5334,9 @@ rlgamma = function(n, location = 0, scale = 1, k=1) {
.elocation = elocation, .escale = escale, .eshape = eshape ))),
vfamily = c("prentice74"),
deriv = eval(substitute(expression({
- a = eta2theta(eta[,1], .llocation, earg = .elocation)
- b = eta2theta(eta[,2], .lscale, earg = .escale)
- k = eta2theta(eta[,3], .lshape, earg = .eshape)
+ a = eta2theta(eta[, 1], .llocation, earg = .elocation)
+ b = eta2theta(eta[, 2], .lscale, earg = .escale)
+ k = eta2theta(eta[, 3], .lshape, earg = .eshape)
tmp55 = k^(-2)
mustar = digamma(tmp55)
doubw = (y-a)*k/b + mustar
@@ -5173,74 +5377,62 @@ rlgamma = function(n, location = 0, scale = 1, k=1) {
dgengamma = function(x, scale = 1, d = 1, k = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
- if (!is.Numeric(scale, posit = TRUE))
- stop("bad input for argument 'scale'")
- if (!is.Numeric(d, posit = TRUE))
- stop("bad input for argument 'd'")
- if (!is.Numeric(k, posit = TRUE))
- stop("bad input for argument 'k'")
- N = max(length(x), length(scale), length(d), length(k))
- x = rep(x, len = N); scale = rep(scale, len = N);
- d = rep(d, len = N); k = rep(k, len = N);
-
- Loglik = rep(log(0), len = N)
- xok = x > 0
- if (any(xok)) {
- zedd = (x[xok]/scale[xok])^d[xok]
- Loglik[xok] = log(d[xok]) + (-d[xok]*k[xok]) * log(scale[xok]) +
- (d[xok]*k[xok]-1) * log(x[xok]) - zedd - lgamma(k[xok])
- }
- if (log.arg) {
- Loglik
- } else {
- exp(Loglik)
- }
+ if (!is.Numeric(scale, positive = TRUE))
+ stop("bad input for argument 'scale'")
+ if (!is.Numeric(d, positive = TRUE))
+ stop("bad input for argument 'd'")
+ if (!is.Numeric(k, positive = TRUE))
+ stop("bad input for argument 'k'")
+
+ N = max(length(x), length(scale), length(d), length(k))
+ x = rep(x, length.out = N); scale = rep(scale, length.out = N);
+ d = rep(d, length.out = N); k = rep(k, length.out = N);
+
+ Loglik = rep(log(0), length.out = N)
+ xok = x > 0
+ if (any(xok)) {
+ zedd = (x[xok]/scale[xok])^d[xok]
+ Loglik[xok] = log(d[xok]) + (-d[xok]*k[xok]) * log(scale[xok]) +
+ (d[xok]*k[xok]-1) * log(x[xok]) - zedd - lgamma(k[xok])
+ }
+ if (log.arg) {
+ Loglik
+ } else {
+ exp(Loglik)
+ }
}
-pgengamma = function(q, scale = 1, d = 1, k=1) {
- if (!is.Numeric(scale, posit = TRUE))
- stop("bad input for argument 'scale'")
- if (!is.Numeric(d, posit = TRUE))
- stop("bad input for argument 'd'")
- if (!is.Numeric(k, posit = TRUE))
- stop("bad input for argument 'k'")
- z = (q/scale)^d
- pgamma(z, k)
+pgengamma = function(q, scale = 1, d = 1, k = 1) {
+ zedd = (q / scale)^d
+ ans = pgamma(zedd, k)
+ ans[scale < 0] = NaN
+ ans[d <= 0] = NaN
+ ans
}
-qgengamma = function(p, scale = 1, d = 1, k=1) {
- if (!is.Numeric(scale, posit = TRUE))
- stop("bad input for argument 'scale'")
- if (!is.Numeric(d, posit = TRUE))
- stop("bad input for argument 'd'")
- if (!is.Numeric(k, posit = TRUE))
- stop("bad input for argument 'k'")
- q = qgamma(p, k)
- scale * q^(1/d)
+qgengamma = function(p, scale = 1, d = 1, k = 1) {
+ ans = scale * qgamma(p, k)^(1/d)
+ ans[scale < 0] = NaN
+ ans[d <= 0] = NaN
+ ans
}
-rgengamma = function(n, scale = 1, d = 1, k=1) {
- if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
- stop("bad input for 'n'")
- if (!is.Numeric(scale, posit = TRUE))
- stop("bad input for 'scale'")
- if (!is.Numeric(d, posit = TRUE))
- stop("bad input for 'd'")
- if (!is.Numeric(k, posit = TRUE))
- stop("bad input for 'k'")
- y = rgamma(n, k)
- scale * y^(1/d)
-}
+rgengamma = function(n, scale = 1, d = 1, k = 1) {
+ ans = scale * rgamma(n, k)^(1/d)
+ ans[scale < 0] = NaN
+ ans[d <= 0] = NaN
+ ans
+}
gengamma = function(lscale = "loge", ld = "loge", lk = "loge",
@@ -5253,10 +5445,14 @@ rgengamma = function(n, scale = 1, d = 1, k=1) {
ld = as.character(substitute(ld))
if (mode(lk) != "character" && mode(lk) != "name")
lk = as.character(substitute(lk))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
+ if (length(iscale) &&
+ !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
+
if (!is.list(escale)) escale = list()
if (!is.list(ed)) ed = list()
if (!is.list(ek)) ek = list()
@@ -5282,14 +5478,14 @@ rgengamma = function(n, scale = 1, d = 1, k=1) {
namesof("d", .ld, earg = .ed, tag = FALSE),
namesof("k", .lk, earg = .ek, tag = FALSE))
if (!length(etastart)) {
- b.init = if (length( .iscale)) rep( .iscale, len = length(y)) else {
- rep(mean(y^2) / mean(y), len = length(y))
+ b.init = if (length( .iscale)) rep( .iscale, length.out = length(y)) else {
+ rep(mean(y^2) / mean(y), length.out = length(y))
}
- k.init = if (length( .ik)) rep( .ik, len = length(y)) else {
- rep(mean(y) / b.init, len = length(y))
+ k.init = if (length( .ik)) rep( .ik, length.out = length(y)) else {
+ rep(mean(y) / b.init, length.out = length(y))
}
- d.init = if (length( .id)) rep( .id, len = length(y)) else {
- rep(digamma(k.init) / mean(log(y/b.init)), len = length(y))
+ d.init = if (length( .id)) rep( .id, length.out = length(y)) else {
+ rep(digamma(k.init) / mean(log(y/b.init)), length.out = length(y))
}
etastart = cbind(theta2eta(b.init, .lscale, earg = .escale),
theta2eta(d.init, .ld, earg = .ed),
@@ -5299,8 +5495,8 @@ rgengamma = function(n, scale = 1, d = 1, k=1) {
.escale = escale, .ed = ed, .ek = ek,
.iscale = iscale, .id=id, .ik=ik ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- b = eta2theta(eta[,1], .lscale, earg = .escale)
- k = eta2theta(eta[,3], .lk, earg = .ek)
+ b = eta2theta(eta[, 1], .lscale, earg = .escale)
+ k = eta2theta(eta[, 3], .lk, earg = .ek)
b * k
}, list( .ld = ld, .lscale = lscale, .lk = lk,
.escale = escale, .ed = ed, .ek = ek ))),
@@ -5311,9 +5507,9 @@ rgengamma = function(n, scale = 1, d = 1, k=1) {
.escale = escale, .ed = ed, .ek = ek ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- b = eta2theta(eta[,1], .lscale, earg = .escale)
- d = eta2theta(eta[,2], .ld, earg = .ed)
- k = eta2theta(eta[,3], .lk, earg = .ek)
+ b = eta2theta(eta[, 1], .lscale, earg = .escale)
+ d = eta2theta(eta[, 2], .ld, earg = .ed)
+ k = eta2theta(eta[, 3], .lk, earg = .ek)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(w * dgengamma(x = y, scale=b, d=d, k=k, log = TRUE))
@@ -5322,9 +5518,9 @@ rgengamma = function(n, scale = 1, d = 1, k=1) {
.escale = escale, .ed = ed, .ek = ek ))),
vfamily = c("gengamma"),
deriv = eval(substitute(expression({
- b = eta2theta(eta[,1], .lscale, earg = .escale)
- d = eta2theta(eta[,2], .ld, earg = .ed)
- k = eta2theta(eta[,3], .lk, earg = .ek)
+ b = eta2theta(eta[, 1], .lscale, earg = .escale)
+ d = eta2theta(eta[, 2], .ld, earg = .ed)
+ k = eta2theta(eta[, 3], .lk, earg = .ek)
tmp22 = (y/b)^d
tmp33 = log(y/b)
dl.db = d * (tmp22 - k) / b
@@ -5364,16 +5560,16 @@ dlog = function(x, prob, log = FALSE) {
stop("bad input for argument 'log'")
rm(log)
- if (!is.Numeric(prob, posit = TRUE) || max(prob) >= 1)
+ if (!is.Numeric(prob, positive = TRUE) || max(prob) >= 1)
stop("bad input for argument 'prob'")
N = max(length(x), length(prob))
if (length(x) != N)
- x = rep(x, len = N)
+ x = rep(x, length.out = N)
if (length(prob) != N)
- prob = rep(prob, len = N)
+ prob = rep(prob, length.out = N)
ox = !is.finite(x)
zero = ox | round(x) != x | x < 1
- ans = rep(0.0, len = length(x))
+ ans = rep(0.0, length.out = length(x))
if (log.arg) {
ans[ zero] = log(0.0)
ans[!zero] = x[!zero] * log(prob[!zero]) - log(x[!zero]) -
@@ -5391,10 +5587,10 @@ dlog = function(x, prob, log = FALSE) {
plog = function(q, prob, log.p = FALSE) {
if (!is.Numeric(q)) stop("bad input for argument 'q'")
- if (!is.Numeric(prob, posit = TRUE) || max(prob) >= 1)
+ if (!is.Numeric(prob, positive = TRUE) || max(prob) >= 1)
stop("bad input for argument 'prob'")
N = max(length(q), length(prob))
- q = rep(q, len = N); prob = rep(prob, len = N);
+ q = rep(q, length.out = N); prob = rep(prob, length.out = N);
bigno = 10
owen1965 = (q * (1 - prob) > bigno)
@@ -5434,10 +5630,10 @@ plog = function(q, prob, log.p = FALSE) {
if (FALSE)
plog = function(q, prob, log.p = FALSE) {
if (!is.Numeric(q)) stop("bad input for argument 'q'")
- if (!is.Numeric(prob, posit = TRUE) || max(prob) >= 1)
+ if (!is.Numeric(prob, positive = TRUE) || max(prob) >= 1)
stop("bad input for argument 'prob'")
N = max(length(q), length(prob))
- q = rep(q, len = N); prob = rep(prob, len = N);
+ q = rep(q, length.out = N); prob = rep(prob, length.out = N);
ans = q * 0 # Retains names(q)
if (max(abs(prob-prob[1])) < 1.0e-08) {
qstar = floor(q)
@@ -5464,36 +5660,42 @@ plog = function(q, prob, log.p = FALSE) {
rlog = function(n, prob, Smallno=1.0e-6) {
- if (!is.Numeric(n, posit = TRUE, integ = TRUE))
- stop("bad input for argument 'n'")
- if (!is.Numeric(prob, allow = 1, posit = TRUE) || max(prob) >= 1)
- stop("bad input for argument 'prob'")
- if (!is.Numeric(Smallno, posit = TRUE, allow = 1) || Smallno > 0.01 ||
- Smallno < 2 * .Machine$double.eps)
- stop("bad input for argument 'Smallno'")
- ans = rep(0.0, len = n)
-
- ptr1 = 1; ptr2 = 0
- a = -1 / log1p(-prob)
- mean = a*prob/(1-prob) # E(Y)
- sigma = sqrt(a*prob*(1-a*prob)) / (1-prob) # sd(Y)
- ymax = dlog(x = 1, prob)
- while(ptr2 < n) {
- Lower = 0.5 # A continuity correction is used = 1 - 0.5.
- Upper = mean + 5 * sigma
- while(plog(q=Upper, prob) < 1-Smallno)
- Upper = Upper + sigma
- Upper = Upper + 0.5
- x = round(runif(2*n, min=Lower, max=Upper))
- index = runif(2*n, max=ymax) < dlog(x,prob)
- sindex = sum(index)
- if (sindex) {
- ptr2 = min(n, ptr1 + sindex - 1)
- ans[ptr1:ptr2] = (x[index])[1:(1+ptr2-ptr1)]
- ptr1 = ptr2 + 1
- }
+
+ use.n = if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+
+ if (!is.Numeric(prob, allowable.length = 1, positive = TRUE) ||
+ max(prob) >= 1)
+ stop("bad input for argument 'prob'")
+ if (!is.Numeric(Smallno, positive = TRUE, allowable.length = 1) ||
+ Smallno > 0.01 ||
+ Smallno < 2 * .Machine$double.eps)
+ stop("bad input for argument 'Smallno'")
+ ans = rep(0.0, length.out = use.n)
+
+ ptr1 = 1; ptr2 = 0
+ a = -1 / log1p(-prob)
+ mean = a*prob/(1-prob) # E(Y)
+ sigma = sqrt(a*prob*(1-a*prob)) / (1-prob) # sd(Y)
+ ymax = dlog(x = 1, prob)
+ while(ptr2 < use.n) {
+ Lower = 0.5 # A continuity correction is used = 1 - 0.5.
+ Upper = mean + 5 * sigma
+ while(plog(q = Upper, prob) < 1 - Smallno)
+ Upper = Upper + sigma
+ Upper = Upper + 0.5
+ x = round(runif(2 * use.n, min = Lower, max = Upper))
+ index = runif(2 * use.n, max = ymax) < dlog(x,prob)
+ sindex = sum(index)
+ if (sindex) {
+ ptr2 = min(use.n, ptr1 + sindex - 1)
+ ans[ptr1:ptr2] = (x[index])[1:(1+ptr2-ptr1)]
+ ptr1 = ptr2 + 1
}
- ans
+ }
+ ans
}
@@ -5506,7 +5708,7 @@ rlog = function(n, prob, Smallno=1.0e-6) {
logff = function(link = "logit", earg = list(), init.c = NULL)
{
if (length(init.c) &&
- (!is.Numeric(init.c, posit = TRUE) || max(init.c) >= 1))
+ (!is.Numeric(init.c, positive = TRUE) || max(init.c) >= 1))
stop("init.c must be in (0,1)")
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
@@ -5527,7 +5729,8 @@ rlog = function(n, prob, Smallno=1.0e-6) {
sum(w * (log(a) + y * log(cc) - log(y)))
}
c.init = if (length( .init.c )) .init.c else
- getInitVals(gvals=seq(0.05, 0.95, len=9), llfun=llfun, y = y, w = w)
+ getInitVals(gvals = seq(0.05, 0.95, length.out = 9),
+ llfun = llfun, y = y, w = w)
c.init = rep(c.init, length=length(y))
etastart = theta2eta(c.init, .link, earg = .earg)
}
@@ -5576,7 +5779,7 @@ rlog = function(n, prob, Smallno=1.0e-6) {
- delta.known = is.Numeric(delta, allow = 1)
+ delta.known = is.Numeric(delta, allowable.length = 1)
if (mode(link.gamma) != "character" && mode(link.gamma) != "name")
link.gamma = as.character(substitute(link.gamma))
if (!is.list(earg)) earg = list()
@@ -5627,8 +5830,8 @@ rlog = function(n, prob, Smallno=1.0e-6) {
.igamma=igamma ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
eta = as.matrix(eta)
- mygamma = eta2theta(eta[,1], .link.gamma, earg = .earg)
- delta = if ( .delta.known) .delta else eta[,2]
+ mygamma = eta2theta(eta[, 1], .link.gamma, earg = .earg)
+ delta = if ( .delta.known) .delta else eta[, 2]
NA * mygamma
@@ -5648,8 +5851,8 @@ rlog = function(n, prob, Smallno=1.0e-6) {
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
eta = as.matrix(eta)
- mygamma = eta2theta(eta[,1], .link.gamma, earg = .earg)
- delta = if ( .delta.known) .delta else eta[,2]
+ mygamma = eta2theta(eta[, 1], .link.gamma, earg = .earg)
+ delta = if ( .delta.known) .delta else eta[, 2]
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else
sum(w * 0.5 * (log(mygamma) -3*log(y-delta) - mygamma / (y-delta )))
@@ -5659,8 +5862,8 @@ rlog = function(n, prob, Smallno=1.0e-6) {
vfamily = c("levy"),
deriv = eval(substitute(expression({
eta = as.matrix(eta)
- mygamma = eta2theta(eta[,1], .link.gamma, earg = .earg)
- delta = if ( .delta.known) .delta else eta[,2]
+ mygamma = eta2theta(eta[, 1], .link.gamma, earg = .earg)
+ delta = if ( .delta.known) .delta else eta[, 2]
if (! .delta.known)
dl.ddelta = (3 - mygamma / (y-delta)) / (2 * (y-delta))
dl.dgamma = 0.5 * (1 / mygamma - 1 / (y-delta))
@@ -5687,156 +5890,41 @@ rlog = function(n, prob, Smallno=1.0e-6) {
- if (FALSE)
- stoppa = function(y0,
- link.alpha = "loge",
- link.theta = "loge", ealpha = list(), etheta = list(),
- ialpha = NULL,
- itheta=1.0,
- zero = NULL)
-{
- if (!is.Numeric(y0, allo=1) || y0 <= 0)
- stop("y0 must be a positive value")
-
- if (mode(link.alpha) != "character" && mode(link.alpha) != "name")
- link.alpha = as.character(substitute(link.alpha))
- if (mode(link.theta) != "character" && mode(link.theta) != "name")
- link.theta = as.character(substitute(link.theta))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
- stop("bad input for argument 'zero'")
- if (!is.list(ealpha)) ealpha = list()
- if (!is.list(etheta)) etheta = list()
- new("vglmff",
- blurb = c("Stoppa distribution\n\n",
- "Links: ",
- namesof("alpha", link.alpha, earg = ealpha), ", ",
- namesof("theta", link.theta, earg = etheta), "\n",
- "Mean: theta*y0*beta(1-1/alpha, theta)"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- predictors.names =
- c(namesof("alpha", .link.alpha, earg = .ealpha, tag = FALSE),
- namesof("theta", .link.theta, earg = .etheta, tag = FALSE))
- y0 = .y0
- if (min(y) < y0) stop("y0 must lie in the interval (0, min(y))")
- if (!length( .ialpha) || !length( .itheta)) {
- qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
- init.theta = if (length( .itheta)) .itheta else 1
- xvec = log1p(-qvec^(1/init.theta))
- fit0 = lsfit(x = xvec, y=log(quantile(y, qvec))-log(y0), intercept = FALSE)
- }
+dlino = function(x, shape1, shape2, lambda = 1, log = FALSE) {
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
- extra$y0 = y0
- if (!length(etastart)) {
- alpha = rep(if (length( .ialpha)) .ialpha else
- -1/fit0$coef[1], length = n)
- theta = rep(if (length( .itheta)) .itheta else 1.0, length = n)
- etastart = cbind(theta2eta(alpha, .link.alpha, earg = .ealpha),
- theta2eta(theta, .link.theta, earg = .etheta))
- }
- }), list( .link.theta = link.theta, .link.alpha = link.alpha,
- .y0=y0,
- .itheta=itheta, .ialpha=ialpha ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- alpha = eta2theta(eta[,1], .link.alpha, earg = .ealpha)
- theta = eta2theta(eta[,2], .link.theta, earg = .etheta)
- theta * extra$y0 * beta(1-1/alpha, theta)
- }, list( .link.theta = link.theta, .link.alpha = link.alpha ))),
- last = eval(substitute(expression({
- misc$link = c(alpha = .link.alpha, theta = .link.theta)
- }), list( .link.theta = link.theta, .link.alpha = link.alpha ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- alpha = eta2theta(eta[,1], .link.alpha, earg = .ealpha)
- theta = eta2theta(eta[,2], .link.theta, earg = .etheta)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else
- sum(w*(log(theta*alpha) + alpha*log(extra$y0) -(alpha+1)*log(y)+
- (theta-1) * log1p(-(y/extra$y0)^(-alpha))))
- }, list( .link.theta = link.theta, .link.alpha = link.alpha ))),
- vfamily = c("stoppa"),
- deriv = eval(substitute(expression({
- alpha = eta2theta(eta[,1], .link.alpha, earg = .ealpha)
- theta = eta2theta(eta[,2], .link.theta, earg = .etheta)
- temp8 = (y / extra$y0)^(-alpha)
- temp8a = log(temp8)
- temp8b = log1p(-temp8)
- dl.dalpha = 1/alpha - log(y/extra$y0) + (theta-1) * temp8 *
- log(y / extra$y0) / (1-temp8)
- dl.dtheta = 1/theta + temp8b
- dalpha.deta = dtheta.deta(alpha, .link.alpha, earg = .ealpha)
- dTHETA.deta = dtheta.deta(theta, .link.theta, earg = .etheta)
- c(w) * cbind( dl.dalpha * dalpha.deta,
- dl.dtheta * dTHETA.deta )
- }), list( .link.theta = link.theta, .link.alpha = link.alpha ))),
- weight = eval(substitute(expression({
- ed2l.dalpha = 1/alpha^2 + theta * (2 * log(extra$y0) * (digamma(2)-
- digamma(theta+4)) - (trigamma(1) +
- trigamma(theta+3)) / alpha^3) / (alpha *
- (theta+1) * (theta+2) / n)
- ed2l.dtheta = 1 / theta^2
- ed2l.dalphatheta = (digamma(2)-digamma(theta+2)) / (alpha*(theta+1))
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
- wz[,iam(1,1,M)] = ed2l.dalpha * dalpha.deta^2
- wz[,iam(2,2,M)] = ed2l.dtheta * dTHETA.deta^2
- wz[,iam(1,2,M)] = ed2l.dalpha * dTHETA.deta * dalpha.deta
- wz = c(w) * wz
- wz
- }), list( .link.theta = link.theta, .link.alpha = link.alpha ))) )
+ loglik = dbeta(x = x, shape1 = shape1, shape2 = shape2, log = TRUE) +
+ shape1 * log(lambda) -
+ (shape1+shape2) * log1p(-(1-lambda)*x)
+ if (log.arg) loglik else exp(loglik)
}
-
-
-dlino = function(x, shape1, shape2, lambda = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
-
- loglik = dbeta(x = x, shape1=shape1, shape2=shape2, log = TRUE) +
- shape1 * log(lambda) -
- (shape1+shape2) * log1p(-(1-lambda)*x)
- if (log.arg) loglik else exp(loglik)
+plino = function(q, shape1, shape2, lambda = 1) {
+ ans = pbeta(q = lambda * q / (1 - (1-lambda)*q),
+ shape1 = shape1, shape2 = shape2)
+ ans[lambda <= 0] = NaN
+ ans
}
-plino = function(q, shape1, shape2, lambda=1) {
- if (!is.Numeric(q)) stop("bad input for 'q'")
- if (!is.Numeric(shape1, posit = TRUE))
- stop("bad input for argument 'shape1'")
- if (!is.Numeric(shape2, posit = TRUE))
- stop("bad input for argument 'shape2'")
- if (!is.Numeric(lambda, posit = TRUE))
- stop("bad input for argument 'lambda'")
- pbeta(q=lambda*q/(1 - (1-lambda)*q), shape1=shape1, shape2=shape2)
-}
-qlino = function(p, shape1, shape2, lambda=1) {
- if (!is.Numeric(p, posit = TRUE) || any(p >= 1))
- stop("bad input for argument 'p'")
- if (!is.Numeric(shape1, posit = TRUE))
- stop("bad input for argument 'shape1'")
- if (!is.Numeric(lambda, posit = TRUE))
- stop("bad input for argument 'lambda'")
- Y = qbeta(p=p, shape1=shape1, shape2=shape2)
- Y / (lambda + (1-lambda)*Y)
+qlino = function(p, shape1, shape2, lambda = 1) {
+ Y = qbeta(p = p, shape1 = shape1, shape2 = shape2)
+ ans = Y / (lambda + (1-lambda)*Y)
+ ans[lambda <= 0] = NaN
+ ans
}
-rlino = function(n, shape1, shape2, lambda=1) {
- if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
- stop("bad input for argument 'n'")
- if (!is.Numeric(shape1, posit = TRUE))
- stop("bad input for argument 'shape1'")
- if (!is.Numeric(shape2, posit = TRUE))
- stop("bad input for argument 'shape2'")
- if (!is.Numeric(lambda, posit = TRUE))
- stop("bad input for argument 'lambda'")
- Y = rbeta(n = n, shape1=shape1, shape2=shape2)
- Y / (lambda + (1-lambda)*Y)
+rlino = function(n, shape1, shape2, lambda = 1) {
+ Y = rbeta(n = n, shape1 = shape1, shape2 = shape2)
+ ans = Y / (lambda + (1 - lambda) * Y)
+ ans[lambda <= 0] = NaN
+ ans
}
@@ -5853,10 +5941,13 @@ rlino = function(n, shape1, shape2, lambda=1) {
lshape2 = as.character(substitute(lshape2))
if (mode(llambda) != "character" && mode(llambda) != "name")
llambda = as.character(substitute(llambda))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
if (!is.Numeric(ilambda, positive = TRUE))
stop("bad input for argument 'ilambda'")
+
if (!is.list(eshape1)) eshape1 = list()
if (!is.list(eshape2)) eshape2 = list()
if (!is.list(elambda)) elambda = list()
@@ -5900,11 +5991,11 @@ rlino = function(n, shape1, shape2, lambda=1) {
}
}), list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
.eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda,
- .ishape1=ishape1, .ishape2=ishape2, .ilambda=ilambda ))),
+ .ishape1=ishape1, .ishape2=ishape2, .ilambda = ilambda ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- sh1 = eta2theta(eta[,1], .lshape1, earg = .eshape1)
- sh2 = eta2theta(eta[,2], .lshape2, earg = .eshape2)
- lambda = eta2theta(eta[,3], .llambda, earg = .elambda)
+ sh1 = eta2theta(eta[, 1], .lshape1, earg = .eshape1)
+ sh2 = eta2theta(eta[, 2], .lshape2, earg = .eshape2)
+ lambda = eta2theta(eta[, 3], .llambda, earg = .elambda)
rep(as.numeric(NA), length = nrow(eta))
}, list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
.eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
@@ -5915,20 +6006,20 @@ rlino = function(n, shape1, shape2, lambda=1) {
.eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- sh1 = eta2theta(eta[,1], .lshape1, earg = .eshape1)
- sh2 = eta2theta(eta[,2], .lshape2, earg = .eshape2)
- lambda = eta2theta(eta[,3], .llambda, earg = .elambda)
+ sh1 = eta2theta(eta[, 1], .lshape1, earg = .eshape1)
+ sh2 = eta2theta(eta[, 2], .lshape2, earg = .eshape2)
+ lambda = eta2theta(eta[, 3], .llambda, earg = .elambda)
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w * dlino(y, shape1=sh1, shape2=sh2, lambda=lambda, log = TRUE))
+ sum(w * dlino(y, shape1 = sh1, shape2 = sh2, lambda=lambda, log = TRUE))
}
}, list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
.eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
vfamily = c("lino"),
deriv = eval(substitute(expression({
- sh1 = eta2theta(eta[,1], .lshape1, earg = .eshape1)
- sh2 = eta2theta(eta[,2], .lshape2, earg = .eshape2)
- lambda = eta2theta(eta[,3], .llambda, earg = .elambda)
+ sh1 = eta2theta(eta[, 1], .lshape1, earg = .eshape1)
+ sh2 = eta2theta(eta[, 2], .lshape2, earg = .eshape2)
+ lambda = eta2theta(eta[, 3], .llambda, earg = .elambda)
temp1 = log1p(-(1-lambda) * y)
temp2 = digamma(sh1+sh2)
dl.dsh1 = log(lambda) + log(y) - digamma(sh1) + temp2 - temp1
@@ -5964,524 +6055,640 @@ rlino = function(n, shape1, shape2, lambda=1) {
}
- genbetaII= function(link.a = "loge",
- link.scale = "loge",
- link.p = "loge",
- link.q = "loge",
- earg.a = list(), earg.scale = list(),
- earg.p = list(), earg.q = list(),
- init.a = NULL,
- init.scale = NULL,
- init.p=1.0,
- init.q=1.0,
- zero = NULL)
-{
- if (mode(link.a) != "character" && mode(link.a) != "name")
- link.a = as.character(substitute(link.a))
- if (mode(link.scale) != "character" && mode(link.scale) != "name")
- link.scale = as.character(substitute(link.scale))
- if (mode(link.p) != "character" && mode(link.p) != "name")
- link.p = as.character(substitute(link.p))
- if (mode(link.q) != "character" && mode(link.q) != "name")
- link.q = as.character(substitute(link.q))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
- stop("bad input for argument 'zero'")
- if (!is.list(earg.a)) earg.a = list()
- if (!is.list(earg.scale)) earg.scale = list()
- if (!is.list(earg.p)) earg.p = list()
- if (!is.list(earg.q)) earg.q = list()
- new("vglmff",
- blurb = c("Generalized Beta II distribution\n\n",
- "Links: ",
- namesof("a", link.a, earg = earg.a), ", ",
- namesof("scale", link.scale, earg = earg.scale), ", ",
- namesof("p", link.p, earg = earg.p), ", ",
- namesof("q", link.q, earg = earg.q), "\n",
- "Mean: scale*gamma(p + 1/a)*gamma(q - 1/a)/(gamma(p)*gamma(q))"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- predictors.names =
- c(namesof("a", .link.a, earg = .earg.a, tag = FALSE),
- namesof("scale", .link.scale, earg = .earg.scale, tag = FALSE),
- namesof("p", .link.p, earg = .earg.p, tag = FALSE),
- namesof("q", .link.q, earg = .earg.q, tag = FALSE))
- if (!length( .init.a) || !length( .init.scale )) {
- qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
- init.q = if (length( .init.q)) .init.q else 1
- xvec = log( (1-qvec)^(-1/ init.q ) - 1 )
- fit0 = lsfit(x = xvec, y=log(quantile(y, qvec )))
- }
- if (!length(etastart)) {
- aa = rep(if (length( .init.a)) .init.a else 1/fit0$coef[2],
- length = n)
- scale = rep(if (length( .init.scale )) .init.scale else
- exp(fit0$coef[1]), length = n)
- qq = rep(if (length( .init.q)) .init.q else 1.0, length = n)
- parg = rep(if (length( .init.p)) .init.p else 1.0, length = n)
- etastart = cbind(theta2eta(aa, .link.a, earg = .earg.a),
- theta2eta(scale, .link.scale, earg = .earg.scale),
- theta2eta(parg, .link.p, earg = .earg.p),
- theta2eta(qq, .link.q, earg = .earg.q))
- }
- }), list( .link.a = link.a, .link.scale = link.scale,
- .link.p = link.p, .link.q = link.q,
- .earg.a = earg.a, .earg.scale = earg.scale,
- .earg.p = earg.p, .earg.q = earg.q,
- .init.a = init.a, .init.scale = init.scale,
- .init.p = init.p, .init.q=init.q ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
- parg = eta2theta(eta[,3], .link.p, earg = .earg.p)
- qq = eta2theta(eta[,4], .link.q, earg = .earg.q)
- scale * gamma(parg + 1/aa) *
- gamma(qq - 1/aa) / (gamma(parg)*gamma(qq))
- }, list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale,
- .earg.p = earg.p, .earg.q = earg.q,
- .link.p = link.p, .link.q = link.q ))),
- last = eval(substitute(expression({
- misc$link = c(a = .link.a, scale = .link.scale,
- p = .link.p, q= .link.q)
- misc$earg = list(a = .earg.a, scale = .earg.scale,
- p = .earg.p, q= .earg.q)
- }), list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale,
- .earg.p = earg.p, .earg.q = earg.q,
- .link.p = link.p, .link.q = link.q ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
- parg = eta2theta(eta[,3], .link.p, earg = .earg.p)
- qq = eta2theta(eta[,4], .link.q, earg = .earg.q)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
- -lbeta(parg, qq) - (parg+qq)*log1p((y/scale)^aa)))
- }
- }, list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale,
- .earg.p = earg.p, .earg.q = earg.q,
- .link.p = link.p, .link.q = link.q ))),
- vfamily = c("genbetaII"),
- deriv = eval(substitute(expression({
- aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
- parg = eta2theta(eta[,3], .link.p, earg = .earg.p)
- qq = eta2theta(eta[,4], .link.q, earg = .earg.q)
- temp1 = log(y/scale)
- temp2 = (y/scale)^aa
- temp3 = digamma(parg + qq)
- temp3a = digamma(parg)
- temp3b = digamma(qq)
- temp4 = log1p(temp2)
- dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
- dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
- dl.dp = aa * temp1 + temp3 - temp3a - temp4
- dl.dq = temp3 - temp3b - temp4
- da.deta = dtheta.deta(aa, .link.a, earg = .earg.a)
- dscale.deta = dtheta.deta(scale, .link.scale, earg = .earg.scale)
- dp.deta = dtheta.deta(parg, .link.p, earg = .earg.p)
- dq.deta = dtheta.deta(qq, .link.q, earg = .earg.q)
- c(w) * cbind( dl.da * da.deta,
- dl.dscale * dscale.deta,
- dl.dp * dp.deta,
- dl.dq * dq.deta )
- }), list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale,
- .earg.p = earg.p, .earg.q = earg.q,
- .link.p = link.p, .link.q = link.q ))),
- weight = eval(substitute(expression({
- temp5 = trigamma(parg + qq)
- temp5a = trigamma(parg)
- temp5b = trigamma(qq)
- ed2l.da = (1 + parg+qq + parg * qq * (temp5a + temp5b +
- (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
- (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
- ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
- ed2l.dp = temp5a - temp5
- ed2l.dq = temp5b - temp5
- ed2l.dascale = (parg - qq - parg*qq*(temp3a -temp3b)) /
- (scale*(1 + parg+qq))
- ed2l.dap= -(qq * (temp3a -temp3b) -1) / (aa*(parg+qq))
- ed2l.daq= -(parg * (temp3b -temp3a) -1) / (aa*(parg+qq))
- ed2l.dscalep = aa * qq / (scale*(parg+qq))
- ed2l.dscaleq = -aa * parg / (scale*(parg+qq))
- ed2l.dpq = -temp5
- wz = matrix(as.numeric(NA), n, dimm(M)) #M==4 means 10=dimm(M)
- wz[,iam(1,1,M)] = ed2l.da * da.deta^2
- wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
- wz[,iam(3,3,M)] = ed2l.dp * dp.deta^2
- wz[,iam(4,4,M)] = ed2l.dq * dq.deta^2
- wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
- wz[,iam(1,3,M)] = ed2l.dap * da.deta * dp.deta
- wz[,iam(1,4,M)] = ed2l.daq * da.deta * dq.deta
- wz[,iam(2,3,M)] = ed2l.dscalep * dscale.deta * dp.deta
- wz[,iam(2,4,M)] = ed2l.dscaleq * dscale.deta * dq.deta
- wz[,iam(3,4,M)] = ed2l.dpq * dp.deta * dq.deta
- wz = c(w) * wz
- wz
- }), list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale,
- .earg.p = earg.p, .earg.q = earg.q,
- .link.p = link.p, .link.q = link.q ))))
+ genbetaII = function(lshape1.a = "loge",
+ lscale = "loge",
+ lshape2.p = "loge",
+ lshape3.q = "loge",
+ eshape1.a = list(), escale = list(),
+ eshape2.p = list(), eshape3.q = list(),
+ ishape1.a = NULL,
+ iscale = NULL,
+ ishape2.p = 1.0,
+ ishape3.q = 1.0,
+ zero = NULL)
+{
+
+ if (mode(lshape1.a) != "character" && mode(lshape1.a) != "name")
+ lshape1.a = as.character(substitute(lshape1.a))
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if (mode(lshape2.p) != "character" && mode(lshape2.p) != "name")
+ lshape2.p = as.character(substitute(lshape2.p))
+ if (mode(lshape3.q) != "character" && mode(lshape3.q) != "name")
+ lshape3.q = as.character(substitute(lshape3.q))
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+ if (!is.list(eshape1.a)) eshape1.a = list()
+ if (!is.list(escale)) escale = list()
+ if (!is.list(eshape2.p)) eshape2.p = list()
+ if (!is.list(eshape3.q)) eshape3.q = list()
+
+ new("vglmff",
+ blurb = c("Generalized Beta II distribution\n\n",
+ "Links: ",
+ namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape2.p", lshape2.p, earg = eshape2.p), ", ",
+ namesof("shape3.q", lshape3.q, earg = eshape3.q), "\n",
+ "Mean: scale * gamma(shape2.p + 1/shape1.a) * ",
+ "gamma(shape3.q - 1/shape1.a) / ",
+ "(gamma(shape2.p) * gamma(shape3.q))"),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ predictors.names =
+ c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
+ namesof("scale", .lscale, earg = .escale, tag = FALSE),
+ namesof("shape2.p", .lshape2.p, earg = .eshape2.p, tag = FALSE),
+ namesof("shape3.q", .lshape3.q, earg = .eshape3.q, tag = FALSE))
+
+ if (!length( .ishape1.a) || !length( .iscale )) {
+ qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
+ ishape3.q = if (length( .ishape3.q)) .ishape3.q else 1
+ xvec = log( (1-qvec)^(-1/ ishape3.q ) - 1 )
+ fit0 = lsfit(x = xvec, y=log(quantile(y, qvec )))
+ }
+
+ if (!length(etastart)) {
+ aa = rep(if (length( .ishape1.a)) .ishape1.a else 1/fit0$coef[2],
+ length = n)
+ scale = rep(if (length( .iscale )) .iscale else
+ exp(fit0$coef[1]), length = n)
+ qq = rep(if (length( .ishape3.q )) .ishape3.q else 1.0, leng = n)
+ parg = rep(if (length( .ishape2.p )) .ishape2.p else 1.0, leng = n)
+ etastart = cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
+ theta2eta(scale, .lscale, earg = .escale),
+ theta2eta(parg, .lshape2.p, earg = .eshape2.p),
+ theta2eta(qq, .lshape3.q, earg = .eshape3.q))
+ }
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .lshape2.p = lshape2.p, .lshape3.q = lshape3.q,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
+ .ishape1.a = ishape1.a, .iscale = iscale,
+ .ishape2.p = ishape2.p, .ishape3.q = ishape3.q ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ parg = eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
+ qq = eta2theta(eta[, 4], .lshape3.q, earg = .eshape3.q)
+ ans = Scale * exp(lgamma(parg + 1/aa) +
+ lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
+ ans[parg + 1/aa <= 0] = NA
+ ans[qq - 1/aa <= 0] = NA
+ ans[aa <= 0] = NA
+ ans[Scale <= 0] = NA
+ ans[parg <= 0] = NA
+ ans[qq <= 0] = NA
+ ans
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
+ .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
+ last = eval(substitute(expression({
+ misc$link = c(shape1.a = .lshape1.a, scale = .lscale,
+ shape2.p = .lshape2.p, shape3.q = .lshape3.q)
+ misc$earg = list(shape1.a = .eshape1.a, scale = .escale,
+ shape2.p = .eshape2.p, shape3.q = .eshape3.q)
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
+ .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ parg = eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
+ qq = eta2theta(eta[, 4], .lshape3.q, earg = .eshape3.q)
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(w * (log(aa) + (aa * parg - 1) * log(y) -
+ aa * parg * log(scale) +
+ - lbeta(parg, qq) - (parg + qq) * log1p((y/scale)^aa)))
+ }
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
+ .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
+ vfamily = c("genbetaII"),
+ deriv = eval(substitute(expression({
+ aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ parg = eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
+ qq = eta2theta(eta[, 4], .lshape3.q, earg = .eshape3.q)
+
+ temp1 = log(y/scale)
+ temp2 = (y/scale)^aa
+ temp3 = digamma(parg + qq)
+ temp3a = digamma(parg)
+ temp3b = digamma(qq)
+ temp4 = log1p(temp2)
+
+ dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
+ dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
+ dl.dp = aa * temp1 + temp3 - temp3a - temp4
+ dl.dq = temp3 - temp3b - temp4
+
+ da.deta = dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
+ dscale.deta = dtheta.deta(scale, .lscale, earg = .escale)
+ dp.deta = dtheta.deta(parg, .lshape2.p, earg = .eshape2.p)
+ dq.deta = dtheta.deta(qq, .lshape3.q, earg = .eshape3.q)
+
+ c(w) * cbind( dl.da * da.deta,
+ dl.dscale * dscale.deta,
+ dl.dp * dp.deta,
+ dl.dq * dq.deta )
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
+ .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
+ weight = eval(substitute(expression({
+ temp5 = trigamma(parg + qq)
+ temp5a = trigamma(parg)
+ temp5b = trigamma(qq)
+
+ ed2l.da = (1 + parg+qq + parg * qq * (temp5a + temp5b +
+ (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
+ (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
+ ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
+ ed2l.dp = temp5a - temp5
+ ed2l.dq = temp5b - temp5
+ ed2l.dascale = (parg - qq - parg * qq *
+ (temp3a -temp3b)) / (scale*(1 + parg+qq))
+ ed2l.dap = -(qq * (temp3a -temp3b) -1) / (aa*(parg+qq))
+ ed2l.daq = -(parg * (temp3b -temp3a) -1) / (aa*(parg+qq))
+ ed2l.dscalep = aa * qq / (scale*(parg+qq))
+ ed2l.dscaleq = -aa * parg / (scale*(parg+qq))
+ ed2l.dpq = -temp5
+
+ wz = matrix(as.numeric(NA), n, dimm(M)) #M==4 means 10=dimm(M)
+ wz[,iam(1,1,M)] = ed2l.da * da.deta^2
+ wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
+ wz[,iam(3,3,M)] = ed2l.dp * dp.deta^2
+ wz[,iam(4,4,M)] = ed2l.dq * dq.deta^2
+ wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
+ wz[,iam(1,3,M)] = ed2l.dap * da.deta * dp.deta
+ wz[,iam(1,4,M)] = ed2l.daq * da.deta * dq.deta
+ wz[,iam(2,3,M)] = ed2l.dscalep * dscale.deta * dp.deta
+ wz[,iam(2,4,M)] = ed2l.dscaleq * dscale.deta * dq.deta
+ wz[,iam(3,4,M)] = ed2l.dpq * dp.deta * dq.deta
+ wz = c(w) * wz
+ wz
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
+ .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))))
}
-rsinmad <- function(n, a, scale = 1, q.arg)
- qsinmad(runif(n), a, scale, q.arg)
+rsinmad <- function(n, shape1.a, scale = 1, shape3.q)
+ qsinmad(runif(n), shape1.a, scale = scale, shape3.q)
-rlomax <- function(n, scale = 1, q.arg)
- rsinmad(n, a = 1, scale, q.arg)
-rfisk <- function(n, a, scale = 1)
- rsinmad(n, a, scale, q.arg = 1)
+rlomax <- function(n, scale = 1, shape3.q)
+ rsinmad(n, shape1.a = 1, scale = scale, shape3.q)
-rparalogistic <- function(n, a, scale = 1)
- rsinmad(n, a, scale, a)
-rdagum <- function(n, a, scale = 1, p.arg)
- qdagum(runif(n), a = a, scale = scale, p.arg = p.arg)
+rfisk <- function(n, shape1.a, scale = 1)
+ rsinmad(n, shape1.a, scale = scale, shape3.q = 1)
-rinvlomax <- function(n, scale = 1, p.arg)
- rdagum(n, a = 1, scale, p.arg)
-rinvparalogistic <- function(n, a, scale = 1)
- rdagum(n, a, scale, a)
+rparalogistic <- function(n, shape1.a, scale = 1)
+ rsinmad(n, shape1.a, scale = scale, shape1.a)
+rdagum <- function(n, shape1.a, scale = 1, shape2.p)
+ qdagum(runif(n), shape1.a = shape1.a, scale = scale, shape2.p = shape2.p)
-qsinmad <- function(p, a, scale = 1, q.arg) {
- bad = (p < 0) | (p > 1)
- ans = NA * p
- a = rep(a, len = length(p))[!bad]
- scale = rep(scale, len = length(p))[!bad]
- q = rep(q.arg, len = length(p))[!bad]
- xx = p[!bad]
- ans[!bad] = scale* ((1 - xx)^(-1/q) - 1)^(1/a)
- ans
+rinvlomax <- function(n, scale = 1, shape2.p)
+ rdagum(n, shape1.a = 1, scale = scale, shape2.p)
+
+
+rinvparalogistic <- function(n, shape1.a, scale = 1)
+ rdagum(n, shape1.a, scale = scale, shape1.a)
+
+
+
+
+qsinmad <- function(p, shape1.a, scale = 1, shape3.q) {
+ bad = (p < 0) | (p > 1)
+ ans = NA * p
+ shape1.a = rep(shape1.a, length.out = length(p))[!bad]
+ scale = rep(scale, length.out = length(p))[!bad]
+ q = rep(shape3.q, length.out = length(p))[!bad]
+ xx = p[!bad]
+ ans[!bad] = scale * ((1 - xx)^(-1/q) - 1)^(1/shape1.a)
+ ans
}
-qlomax <- function(p, scale = 1, q.arg)
- qsinmad(p, a = 1, scale, q.arg)
+qlomax <- function(p, scale = 1, shape3.q)
+ qsinmad(p, shape1.a = 1, scale = scale, shape3.q)
-qfisk <- function(p, a, scale = 1)
- qsinmad(p, a, scale, q.arg = 1)
+qfisk <- function(p, shape1.a, scale = 1)
+ qsinmad(p, shape1.a, scale = scale, shape3.q = 1)
-qparalogistic <- function(p, a, scale = 1)
- qsinmad(p, a, scale, a)
+qparalogistic <- function(p, shape1.a, scale = 1)
+ qsinmad(p, shape1.a, scale = scale, shape1.a)
-qdagum <- function(p, a, scale = 1, p.arg) {
+qdagum <- function(p, shape1.a, scale = 1, shape2.p) {
- LLL <- max(length(p), length(a), length(scale), length(p.arg))
- p <- rep(p, length.out = LLL)
- a <- rep(a, length.out = LLL)
- Scale <- rep(scale, length.out = LLL)
- p.arg <- rep(p.arg, length.out = LLL)
+ LLL = max(length(p), length(shape1.a), length(scale), length(shape2.p))
+ if (length(p) != LLL)
+ p <- rep(p, length.out = LLL)
+ if (length(shape1.a) != LLL)
+ shape1.a <- rep(shape1.a, length.out = LLL)
+ if (length(scale) != LLL)
+ Scale <- rep(scale, length.out = LLL)
+ if (length(shape2.p) != LLL)
+ shape2.p <- rep(shape2.p, length.out = LLL)
- bad = (p < 0) | (p > 1) | (Scale <= 0)
- ans = NA * p
- ans[!bad] = Scale[!bad] * (p[!bad]^(-1/p.arg[!bad]) - 1)^(-1/a[!bad])
- ans
+ bad = (p < 0) | (p > 1) | (Scale <= 0)
+ ans = NA * p
+ ans[!bad] = Scale[!bad] *
+ (p[!bad]^(-1/shape2.p[!bad]) - 1)^(-1/shape1.a[!bad])
+ ans
}
-qinvlomax <- function(p, scale = 1, p.arg)
- qdagum(p, a = 1, scale, p.arg)
+qinvlomax <- function(p, scale = 1, shape2.p)
+ qdagum(p, shape1.a = 1, scale, shape2.p)
-qinvparalogistic <- function(p, a, scale = 1)
- qdagum(p, a, scale, a)
+qinvparalogistic <- function(p, shape1.a, scale = 1)
+ qdagum(p, shape1.a, scale, shape1.a)
-psinmad <- function(q, a, scale = 1, q.arg) {
- zero = q <= 0
- a = rep(a, len = length(q))[!zero]
- scale = rep(scale, len = length(q))[!zero]
- q.arg = rep(q.arg, len = length(q))[!zero]
- ans = 0 * q
- xx = q[!zero]
- ans[!zero] = 1 - (1 + (xx/scale)^a)^(-q.arg)
- ans
+
+psinmad <- function(q, shape1.a, scale = 1, shape3.q) {
+
+
+ LLL = max(length(q), length(shape1.a), length(scale), length(shape3.q))
+ if (length(q) != LLL)
+ q <- rep(q, length.out = LLL)
+ if (length(shape1.a) != LLL)
+ shape1.a <- rep(shape1.a, length.out = LLL)
+ if (length(scale) != LLL)
+ scale <- rep(scale, length.out = LLL)
+ if (length(shape3.q) != LLL)
+ shape3.q <- rep(shape3.q, length.out = LLL)
+
+
+ notpos = (q <= 0)
+ Shape1.a = shape1.a[!notpos]
+ Scale = scale[!notpos]
+ Shape3.q = shape3.q[!notpos]
+ QQ = q[!notpos]
+
+ ans = 0 * q
+ ans[!notpos] = 1 - (1 + (QQ / Scale)^Shape1.a)^(-Shape3.q)
+ ans
}
-plomax = function(q, scale = 1, q.arg)
- psinmad(q, a = 1, scale, q.arg)
-pfisk = function(q, a, scale = 1)
- psinmad(q, a, scale, q.arg=1)
+plomax = function(q, scale = 1, shape3.q)
+ psinmad(q, shape1.a = 1, scale, shape3.q)
-pparalogistic = function(q, a, scale = 1)
- psinmad(q, a, scale, a)
+pfisk = function(q, shape1.a, scale = 1)
+ psinmad(q, shape1.a, scale, shape3.q = 1)
-pdagum <- function(q, a, scale = 1, p.arg) {
- zero <- q <= 0
- a <- rep(a, len = length(q))[!zero]
- scale <- rep(scale, len = length(q))[!zero]
- p <- rep(p.arg, len = length(q))[!zero]
- ans <- 0 * q
- xx <- q[!zero]
- ans[!zero] <- (1 + (xx/scale)^(-a))^(-p)
- ans
+pparalogistic = function(q, shape1.a, scale = 1)
+ psinmad(q, shape1.a, scale, shape1.a)
+
+
+
+pdagum <- function(q, shape1.a, scale = 1, shape2.p) {
+
+
+ LLL = max(length(q), length(shape1.a), length(scale), length(shape2.p))
+ if (length(q) != LLL)
+ q <- rep(q, length.out = LLL)
+ if (length(shape1.a) != LLL)
+ shape1.a <- rep(shape1.a, length.out = LLL)
+ if (length(scale) != LLL)
+ scale <- rep(scale, length.out = LLL)
+ if (length(shape2.p) != LLL)
+ shape2.p <- rep(shape2.p, length.out = LLL)
+
+ notpos = (q <= 0)
+ Shape1.a = shape1.a[!notpos]
+ Scale = scale[!notpos]
+ Shape2.p = shape2.p[!notpos]
+ QQ = q[!notpos]
+
+ ans <- 0 * q
+ ans[!notpos] <- (1 + (QQ/Scale)^(-Shape1.a))^(-Shape2.p)
+ ans
}
-pinvlomax <- function(q, scale = 1, p.arg)
- pdagum(q, a = 1, scale, p.arg)
-pinvparalogistic <- function(q, a, scale = 1)
- pdagum(q, a, scale, a)
+pinvlomax <- function(q, scale = 1, shape2.p)
+ pdagum(q, shape1.a = 1, scale, shape2.p)
+pinvparalogistic <- function(q, shape1.a, scale = 1)
+ pdagum(q, shape1.a, scale, shape1.a)
-dsinmad <- function(x, a, scale = 1, q.arg, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
- LLL <- max(length(x), length(a), length(scale), length(q.arg))
- x <- rep(x, len = LLL);
- a <- rep(a, len = LLL)
- scale <- rep(scale, len = LLL);
- q.arg <- rep(q.arg, len = LLL)
- Loglik <- rep(log(0), len = LLL)
- xok <- (x > 0) # Avoids evaluating log(x) if x is negative.
- Loglik[xok] <- log(a[xok]) + log(q.arg[xok]) + (a[xok]-1)*log(x[xok]) -
- a[xok]*log(scale[xok]) -
- (1+q.arg[xok]) * log1p((x[xok]/scale[xok])^a[xok])
- if (log.arg) Loglik else exp(Loglik)
+
+dsinmad <- function(x, shape1.a, scale = 1, shape3.q, log = FALSE) {
+
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ LLL <- max(length(x), length(shape1.a),
+ length(scale), length(shape3.q))
+ x <- rep(x, length.out = LLL);
+ shape1.a <- rep(shape1.a, length.out = LLL)
+ scale <- rep(scale, length.out = LLL);
+ shape3.q <- rep(shape3.q, length.out = LLL)
+
+ Loglik <- rep(log(0), length.out = LLL)
+ xok <- (x > 0) # Avoids evaluating log(x) if x is negative.
+ Loglik[xok] <- log(shape1.a[xok]) + log(shape3.q[xok]) +
+ (shape1.a[xok]-1) * log(x[xok]) -
+ shape1.a[xok] * log(scale[xok]) -
+ (1 + shape3.q[xok]) * log1p((x[xok]/scale[xok])^shape1.a[xok])
+ if (log.arg) Loglik else exp(Loglik)
}
-dlomax <- function(x, scale = 1, q.arg, log = FALSE)
- dsinmad(x, a = 1, scale, q.arg, log = log)
-dfisk <- function(x, a, scale = 1, log = FALSE)
- dsinmad(x, a, scale, q.arg = 1, log = log)
+dlomax <- function(x, scale = 1, shape3.q, log = FALSE)
+ dsinmad(x, shape1.a = 1, scale, shape3.q, log = log)
-dparalogistic <- function(x, a, scale = 1, log = FALSE)
- dsinmad(x, a, scale, a, log = log)
+dfisk <- function(x, shape1.a, scale = 1, log = FALSE)
+ dsinmad(x, shape1.a, scale, shape3.q = 1, log = log)
-ddagum <- function(x, a, scale = 1, p.arg, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
+dparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
+ dsinmad(x, shape1.a, scale, shape1.a, log = log)
+
- LLL = max(length(x), length(a), length(scale), length(p.arg))
- x = rep(x, len = LLL); a = rep(a, len = LLL)
- scale = rep(scale, len = LLL); p.arg = rep(p.arg, len = LLL)
- Loglik = rep(log(0), len = LLL)
- xok = (x > 0) # Avoids evaluating log(x) if x is negative.
- Loglik[xok] = log(a[xok]) + log(p.arg[xok]) +
- (a[xok]*p.arg[xok]-1)*log(x[xok]) -
- a[xok]*p.arg[xok]*log(scale[xok]) -
- (1+p.arg[xok]) * log1p((x[xok]/scale[xok])^a[xok])
- Loglik[p.arg <= 0] = NaN
- if (log.arg) Loglik else exp(Loglik)
+ddagum <- function(x, shape1.a, scale = 1, shape2.p, log = FALSE) {
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ LLL = max(length(x), length(shape1.a), length(scale), length(shape2.p))
+ x = rep(x, length.out = LLL);
+ shape1.a = rep(shape1.a, length.out = LLL)
+ scale = rep(scale, length.out = LLL);
+ shape2.p = rep(shape2.p, length.out = LLL)
+ Loglik = rep(log(0), length.out = LLL)
+ xok = (x > 0) # Avoids evaluating log(x) if x is negative.
+ Loglik[xok] = log(shape1.a[xok]) +
+ log(shape2.p[xok]) +
+ (shape1.a[xok]*shape2.p[xok]-1)*log(x[xok]) -
+ shape1.a[xok]*shape2.p[xok]*log(scale[xok]) -
+ (1+shape2.p[xok]) * log1p((x[xok]/scale[xok])^shape1.a[xok])
+ Loglik[shape2.p <= 0] = NaN
+ if (log.arg) Loglik else exp(Loglik)
}
-dinvlomax <- function(x, scale = 1, p.arg, log = FALSE)
- ddagum(x, a = 1, scale, p.arg, log = log)
-dinvparalogistic <- function(x, a, scale = 1, log = FALSE)
- ddagum(x, a, scale, a, log = log)
+dinvlomax <- function(x, scale = 1, shape2.p, log = FALSE)
+ ddagum(x, shape1.a = 1, scale, shape2.p, log = log)
+
+
+dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
+ ddagum(x, shape1.a, scale, shape1.a, log = log)
+
+
+
+ sinmad = function(lshape1.a = "loge",
+ lscale = "loge",
+ lshape3.q = "loge",
+ eshape1.a = list(), escale = list(), eshape3.q = list(),
+ ishape1.a = NULL,
+ iscale = NULL,
+ ishape3.q = 1.0,
+ zero = NULL)
+{
+
+ if (mode(lshape1.a) != "character" && mode(lshape1.a) != "name")
+ lshape1.a = as.character(substitute(lshape1.a))
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if (mode(lshape3.q) != "character" && mode(lshape3.q) != "name")
+ lshape3.q = as.character(substitute(lshape3.q))
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+ if (!is.list(eshape1.a)) eshape1.a = list()
+ if (!is.list(escale)) escale = list()
+ if (!is.list(eshape3.q)) eshape3.q = list()
+
+ new("vglmff",
+ blurb = c("Singh-Maddala distribution\n\n",
+ "Links: ",
+ namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape3.q", lshape3.q, earg = eshape3.q), "\n",
+ "Mean: scale * gamma(1 + 1/shape1.a) * ",
+ "gamma(shape3.q - 1/shape1.a) / ",
+ "gamma(shape3.q)"),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ predictors.names =
+ c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
+ namesof("scale", .lscale, earg = .escale, tag = FALSE),
+ namesof("shape3.q", .lshape3.q, earg = .eshape3.q, tag = FALSE))
+ parg = 1
+
+ if (!length( .ishape1.a) || !length( .iscale )) {
+ qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
+ ishape3.q = if (length( .ishape3.q)) .ishape3.q else 1
+ xvec = log( (1-qvec)^(-1/ ishape3.q ) - 1 )
+ fit0 = lsfit(x = xvec, y=log(quantile(y, qvec )))
+ }
+
+
+
+ if (!length(etastart)) {
+ aa = rep(if (length( .ishape1.a)) .ishape1.a else 1/fit0$coef[2],
+ length.out = n)
+ scale = rep(if (length( .iscale )) .iscale else exp(fit0$coef[1]),
+ length.out = n)
+ qq = rep(if (length( .ishape3.q)) .ishape3.q else 1.0,
+ length.out = n)
+
+
+ outOfRange = (aa * qq <= 1)
+ qq[outOfRange] = 1 / aa[outOfRange] + 1
+
+
+ etastart = cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
+ theta2eta(scale, .lscale, earg = .escale),
+ theta2eta(qq, .lshape3.q, earg = .eshape3.q))
+ }
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .lshape3.q = lshape3.q,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape3.q = eshape3.q,
+ .ishape1.a = ishape1.a, .iscale = iscale,
+ .ishape3.q = ishape3.q ))),
+
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ parg = 1
+ qq = eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
+
+ ans = Scale * exp(lgamma(parg + 1/aa) +
+ lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
+ ans[parg + 1/aa <= 0] = NA
+ ans[qq - 1/aa <= 0] = NA
+ ans[aa <= 0] = NA
+ ans[Scale <= 0] = NA
+ ans[qq <= 0] = NA
+ ans
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape3.q = eshape3.q,
+ .lshape3.q = lshape3.q ))),
+
+ last = eval(substitute(expression({
+ misc$link =
+ c(shape1.a = .lshape1.a, scale = .lscale, shape3.q = .lshape3.q)
+ misc$earg =
+ list(shape1.a = .eshape1.a, scale = .escale, shape3.q = .eshape3.q)
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape3.q = eshape3.q,
+ .lshape3.q = lshape3.q ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ parg = 1
+ qq = eta2theta(eta[, 3], .lshape3.q, earg = .earg)
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(w * dsinmad(x = y, shape1.a = aa, scale = scale,
+ shape3.q = qq, log = TRUE))
+ }
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .lshape3.q = lshape3.q,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape3.q = eshape3.q ))),
+ vfamily = c("sinmad"),
+ deriv = eval(substitute(expression({
+ aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ parg = 1
+ qq = eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
+
+ temp1 = log(y/scale)
+ temp2 = (y/scale)^aa
+ temp3a = digamma(parg)
+ temp3b = digamma(qq)
+
+ dl.da = 1 / aa + parg * temp1 - (parg + qq) * temp1 / (1 + 1 / temp2)
+ dl.dscale = (aa / scale) * (-parg + (parg + qq) / (1 + 1 / temp2))
+ dl.dq = digamma(parg + qq) - temp3b - log1p(temp2)
+
+ da.deta = dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
+ dscale.deta = dtheta.deta(scale, .lscale, earg = .escale)
+ dq.deta = dtheta.deta(qq, .lshape3.q, earg = .eshape3.q)
+
+ c(w) * cbind( dl.da * da.deta,
+ dl.dscale * dscale.deta,
+ dl.dq * dq.deta )
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape3.q = eshape3.q,
+ .lshape3.q = lshape3.q ))),
+ weight = eval(substitute(expression({
+ ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
+ (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
+ (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
+ ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
+ ed2l.dq = 1/qq^2
+ ed2l.dascale = (parg - qq - parg*qq *
+ (temp3a -temp3b)) / (scale*(1 + parg+qq))
+ ed2l.daq = -(parg * (temp3b -temp3a) -1) / (aa*(parg+qq))
+ ed2l.dscaleq = -aa * parg / (scale*(parg+qq))
+ wz = matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
+ wz[,iam(1,1,M)] = ed2l.da * da.deta^2
+ wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
+ wz[,iam(3,3,M)] = ed2l.dq * dq.deta^2
+ wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
+ wz[,iam(1,3,M)] = ed2l.daq * da.deta * dq.deta
+ wz[,iam(2,3,M)] = ed2l.dscaleq * dscale.deta * dq.deta
+ wz = c(w) * wz
+ wz
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape3.q = eshape3.q,
+ .lshape3.q = lshape3.q ))))
+}
- sinmad = function(link.a = "loge",
- link.scale = "loge",
- link.q = "loge",
- earg.a = list(), earg.scale = list(), earg.q = list(),
- init.a = NULL,
- init.scale = NULL,
- init.q=1.0,
+ dagum = function(lshape1.a = "loge",
+ lscale = "loge",
+ lshape2.p = "loge",
+ eshape1.a = list(), escale = list(), eshape2.p = list(),
+ ishape1.a = NULL,
+ iscale = NULL,
+ ishape2.p = 1.0,
zero = NULL)
{
- if (mode(link.a) != "character" && mode(link.a) != "name")
- link.a = as.character(substitute(link.a))
- if (mode(link.scale) != "character" && mode(link.scale) != "name")
- link.scale = as.character(substitute(link.scale))
- if (mode(link.q) != "character" && mode(link.q) != "name")
- link.q = as.character(substitute(link.q))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
+ if (mode(lshape1.a) != "character" && mode(lshape1.a) != "name")
+ lshape1.a = as.character(substitute(lshape1.a))
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if (mode(lshape2.p) != "character" && mode(lshape2.p) != "name")
+ lshape2.p = as.character(substitute(lshape2.p))
+
+ if (!is.list(eshape1.a)) eshape1.a = list()
+ if (!is.list(escale)) escale = list()
+ if (!is.list(eshape2.p)) eshape2.p = list()
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (!is.list(earg.a)) earg.a = list()
- if (!is.list(earg.scale)) earg.scale = list()
- if (!is.list(earg.q)) earg.q = list()
new("vglmff",
- blurb = c("Singh-Maddala distribution\n\n",
+ blurb = c("Dagum distribution\n\n",
"Links: ",
- namesof("a", link.a, earg = earg.a), ", ",
- namesof("scale", link.scale, earg = earg.scale), ", ",
- namesof("q", link.q, earg = earg.q), "\n",
- "Mean: scale*gamma(1 + 1/a)*gamma(q - 1/a)/gamma(q)"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("a", .link.a, earg = .earg.a, tag = FALSE),
- namesof("scale", .link.scale, earg = .earg.scale, tag = FALSE),
- namesof("q", .link.q, earg = .earg.q, tag = FALSE))
- parg = 1
-
- if (!length( .init.a) || !length( .init.scale )) {
- qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
- init.q = if (length( .init.q)) .init.q else 1
- xvec = log( (1-qvec)^(-1/ init.q ) - 1 )
- fit0 = lsfit(x = xvec, y=log(quantile(y, qvec )))
- }
-
- if (!length(etastart)) {
- aa = rep(if (length( .init.a)) .init.a else 1/fit0$coef[2],
- length = n)
- scale = rep(if (length( .init.scale )) .init.scale else
- exp(fit0$coef[1]), length = n)
- qq = rep(if (length( .init.q)) .init.q else 1.0, length = n)
- etastart = cbind(theta2eta(aa, .link.a, earg = .earg.a),
- theta2eta(scale, .link.scale, earg = .earg.scale),
- theta2eta(qq, .link.q, earg = .earg.q))
- }
- }), list( .link.a = link.a, .link.scale = link.scale,
- .link.q = link.q,
- .earg.a = earg.a, .earg.scale = earg.scale,
- .earg.q = earg.q,
- .init.a = init.a, .init.scale = init.scale,
- .init.q=init.q ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
- qq = eta2theta(eta[,3], .link.q, earg = .earg.q)
- scale*gamma(1 + 1/aa)*gamma(qq-1/aa)/(gamma(qq))
- }, list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale,
- .earg.q = earg.q,
- .link.q = link.q ))),
- last = eval(substitute(expression({
- misc$link = c(a = .link.a, scale = .link.scale, q= .link.q)
- misc$earg = list(a = .earg.a, scale = .earg.scale, q= .earg.q)
- }), list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale,
- .earg.q = earg.q,
- .link.q = link.q ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
- parg = 1
- qq = eta2theta(eta[,3], .link.q, earg = .earg)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(w * dsinmad(x = y, a=aa, scale = scale, q.arg=qq, log = TRUE))
- }
- }, list( .link.a = link.a, .link.scale = link.scale, .link.q = link.q,
- .earg.a = earg.a, .earg.scale = earg.scale, .earg.q = earg.q ))),
- vfamily = c("sinmad"),
- deriv = eval(substitute(expression({
- aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
- parg = 1
- qq = eta2theta(eta[,3], .link.q, earg = .earg.q)
-
- temp1 = log(y/scale)
- temp2 = (y/scale)^aa
- temp3a = digamma(parg)
- temp3b = digamma(qq)
-
- dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
- dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
- dl.dq = digamma(parg + qq) - temp3b - log1p(temp2)
- da.deta = dtheta.deta(aa, .link.a, earg = .earg.a)
- dscale.deta = dtheta.deta(scale, .link.scale, earg = .earg.scale)
- dq.deta = dtheta.deta(qq, .link.q, earg = .earg.q)
- c(w) * cbind( dl.da * da.deta,
- dl.dscale * dscale.deta,
- dl.dq * dq.deta )
- }), list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale,
- .earg.q = earg.q,
- .link.q = link.q ))),
- weight = eval(substitute(expression({
- ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
- (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
- (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
- ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
- ed2l.dq = 1/qq^2
- ed2l.dascale = (parg - qq - parg*qq*(temp3a -temp3b)) /
- (scale*(1 + parg+qq))
- ed2l.daq= -(parg * (temp3b -temp3a) -1) / (aa*(parg+qq))
- ed2l.dscaleq = -aa * parg / (scale*(parg+qq))
- wz = matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
- wz[,iam(1,1,M)] = ed2l.da * da.deta^2
- wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
- wz[,iam(3,3,M)] = ed2l.dq * dq.deta^2
- wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
- wz[,iam(1,3,M)] = ed2l.daq * da.deta * dq.deta
- wz[,iam(2,3,M)] = ed2l.dscaleq * dscale.deta * dq.deta
- wz = c(w) * wz
- wz
- }), list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale,
- .earg.q = earg.q,
- .link.q = link.q ))))
-}
-
-
- dagum = function(link.a = "loge",
- link.scale = "loge",
- link.p = "loge",
- earg.a = list(), earg.scale = list(), earg.p = list(),
- init.a = NULL,
- init.scale = NULL,
- init.p=1.0,
- zero = NULL)
-{
-
- if (mode(link.a) != "character" && mode(link.a) != "name")
- link.a = as.character(substitute(link.a))
- if (mode(link.scale) != "character" && mode(link.scale) != "name")
- link.scale = as.character(substitute(link.scale))
- if (mode(link.p) != "character" && mode(link.p) != "name")
- link.p = as.character(substitute(link.p))
- if (!is.list(earg.a)) earg.a = list()
- if (!is.list(earg.scale)) earg.scale = list()
- if (!is.list(earg.p)) earg.p = list()
-
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
- stop("bad input for argument 'zero'")
-
- new("vglmff",
- blurb = c("Dagum distribution\n\n",
- "Links: ",
- namesof("a", link.a, earg = earg.a), ", ",
- namesof("scale", link.scale, earg = earg.scale), ", ",
- namesof("p", link.p, earg = earg.p), "\n",
- "Mean: scale*gamma(p + 1/a)*gamma(1 - 1/a)/gamma(p)"),
+ namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape2.p", lshape2.p, earg = eshape2.p), "\n",
+ "Mean: scale * gamma(shape2.p + 1/shape1.a) * ",
+ "gamma(1 - 1/shape1.a) / ",
+ "gamma(shape2.p)"),
constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
@@ -6490,67 +6697,74 @@ dinvparalogistic <- function(x, a, scale = 1, log = FALSE)
stop("response must be a vector or a one-column matrix")
predictors.names <-
- c(namesof("a", .link.a, earg = .earg.a, tag = FALSE),
- namesof("scale", .link.scale, earg = .earg.scale, tag = FALSE),
- namesof("p", .link.p, earg = .earg.p, tag = FALSE))
+ c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
+ namesof("scale", .lscale, earg = .escale, tag = FALSE),
+ namesof("shape2.p", .lshape2.p, earg = .eshape2.p, tag = FALSE))
- if (!length( .init.a) || !length( .init.scale )) {
+ if (!length( .ishape1.a) || !length( .iscale )) {
qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
- init.p = if (length( .init.p)) .init.p else 1
- xvec = log( qvec^(-1/ init.p ) - 1 )
+ ishape2.p = if (length( .ishape2.p)) .ishape2.p else 1
+ xvec = log( qvec^(-1/ ishape2.p ) - 1 )
fit0 = lsfit(x = xvec, y=log(quantile(y, qvec )))
}
if (!length(etastart)) {
- parg = rep(if (length( .init.p)) .init.p else 1.0, length = n)
- aa = rep(if (length( .init.a)) .init.a else -1/fit0$coef[2],
+ parg = rep(if (length( .ishape2.p)) .ishape2.p else 1.0, length = n)
+ aa = rep(if (length( .ishape1.a)) .ishape1.a else -1/fit0$coef[2],
length = n)
- scale = rep(if (length( .init.scale )) .init.scale else
+ scale = rep(if (length( .iscale )) .iscale else
exp(fit0$coef[1]), length = n)
- etastart = cbind(theta2eta(aa, .link.a, earg = .earg.a),
- theta2eta(scale, .link.scale, earg = .earg.scale),
- theta2eta(parg, .link.p, earg = .earg.p))
- }
- }), list( .link.a = link.a, .link.scale = link.scale,
- .link.p = link.p,
- .earg.a = earg.a, .earg.scale = earg.scale,
- .earg.p = earg.p,
- .init.a = init.a, .init.scale = init.scale,
- .init.p = init.p ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
- parg = eta2theta(eta[,3], .link.p, earg = .earg.p)
- qq = 1
- scale * gamma(parg + 1/aa) *
- gamma(qq - 1/aa) / (gamma(parg)*gamma(qq))
- }, list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale,
- .earg.p = earg.p,
- .link.p = link.p ))),
- last = eval(substitute(expression({
- misc$link = c(a = .link.a, scale = .link.scale, p = .link.p )
- misc$earg = list(a = .earg.a, scale = .earg.scale, p = .earg.p)
- }), list( .link.a = link.a, .link.scale = link.scale, .link.p = link.p,
- .earg.a = earg.a, .earg.scale = earg.scale, .earg.p = earg.p ))),
+ etastart = cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
+ theta2eta(scale, .lscale, earg = .escale),
+ theta2eta(parg, .lshape2.p, earg = .eshape2.p))
+ }
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .lshape2.p = lshape2.p,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p,
+ .ishape1.a = ishape1.a, .iscale = iscale,
+ .ishape2.p = ishape2.p ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ parg = eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
+ qq = 1
+
+ ans = Scale * exp(lgamma(parg + 1/aa) +
+ lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
+ ans[parg + 1/aa <= 0] = NA
+ ans[qq - 1/aa <= 0] = NA
+ ans[aa <= 0] = NA
+ ans[Scale <= 0] = NA
+ ans[parg <= 0] = NA
+ ans
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p,
+ .lshape2.p = lshape2.p ))),
+ last = eval(substitute(expression({
+ misc$link = c(shape1.a = .lshape1.a, scale = .lscale, p = .lshape2.p )
+ misc$earg = list(shape1.a = .eshape1.a, scale = .escale, p = .eshape2.p )
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale, .lshape2.p = lshape2.p,
+ .eshape1.a = eshape1.a, .escale = escale, .eshape2.p = eshape2.p ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
- parg = eta2theta(eta[,3], .link.p, earg = .earg.p)
+ aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ parg = eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
qq = 1
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w * ddagum(x = y, a = aa, scale = scale, p.arg = parg,
- log = TRUE))
+ sum(w * ddagum(x = y, shape1.a = aa, scale = scale,
+ shape2.p = parg, log = TRUE))
}
- }, list( .link.a = link.a, .link.scale = link.scale, .link.p = link.p,
- .earg.a = earg.a, .earg.scale = earg.scale, .earg.p = earg.p ))),
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale, .lshape2.p = lshape2.p,
+ .eshape1.a = eshape1.a, .escale = escale, .eshape2.p = eshape2.p ))),
vfamily = c("dagum"),
deriv = eval(substitute(expression({
- aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
- parg = eta2theta(eta[,3], .link.p, earg = .earg.p)
+ aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ parg = eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
qq = 1
temp1 = log(y/scale)
@@ -6561,16 +6775,16 @@ dinvparalogistic <- function(x, a, scale = 1, log = FALSE)
dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
dl.dp = aa * temp1 + digamma(parg + qq) - temp3a - log1p(temp2)
- da.deta = dtheta.deta(aa, .link.a, earg = .earg.a)
- dscale.deta = dtheta.deta(scale, .link.scale, earg = .earg.scale)
- dp.deta = dtheta.deta(parg, .link.p, earg = .earg.p)
+ da.deta = dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
+ dscale.deta = dtheta.deta(scale, .lscale, earg = .escale)
+ dp.deta = dtheta.deta(parg, .lshape2.p, earg = .eshape2.p)
c(w) * cbind( dl.da * da.deta,
dl.dscale * dscale.deta,
dl.dp * dp.deta )
- }), list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale,
- .earg.p = earg.p,
- .link.p = link.p ))),
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p,
+ .lshape2.p = lshape2.p ))),
weight = eval(substitute(expression({
ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
(temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
@@ -6590,38 +6804,43 @@ dinvparalogistic <- function(x, a, scale = 1, log = FALSE)
wz[,iam(2,3,M)] = ed2l.dscalep * dscale.deta * dp.deta
wz = c(w) * wz
wz
- }), list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale,
- .earg.p = earg.p,
- .link.p = link.p ))))
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p,
+ .lshape2.p = lshape2.p ))))
}
- betaII = function(link.scale = "loge", link.p = "loge", link.q = "loge",
- earg.scale = list(), earg.p = list(), earg.q = list(),
- init.scale = NULL, init.p=1.0, init.q=1.0, zero = NULL)
-{
+ betaII = function(lscale = "loge", lshape2.p = "loge", lshape3.q = "loge",
+ escale = list(), eshape2.p = list(), eshape3.q = list(),
+ iscale = NULL, ishape2.p = 2, ishape3.q = 2,
+ zero = NULL) {
- if (mode(link.scale) != "character" && mode(link.scale) != "name")
- link.scale = as.character(substitute(link.scale))
- if (mode(link.p) != "character" && mode(link.p) != "name")
- link.p = as.character(substitute(link.p))
- if (mode(link.q) != "character" && mode(link.q) != "name")
- link.q = as.character(substitute(link.q))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
- stop("bad input for argument 'zero'")
- if (!is.list(earg.scale)) earg.scale = list()
- if (!is.list(earg.p)) earg.p = list()
- if (!is.list(earg.q)) earg.q = list()
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if (mode(lshape2.p) != "character" && mode(lshape2.p) != "name")
+ lshape2.p = as.character(substitute(lshape2.p))
+ if (mode(lshape3.q) != "character" && mode(lshape3.q) != "name")
+ lshape3.q = as.character(substitute(lshape3.q))
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+ if (!is.list(escale)) escale = list()
+ if (!is.list(eshape2.p)) eshape2.p = list()
+ if (!is.list(eshape3.q)) eshape3.q = list()
new("vglmff",
blurb = c("Beta II distribution\n\n",
"Links: ",
- namesof("scale", link.scale, earg = earg.scale), ", ",
- namesof("p", link.p, earg = earg.p), ", ",
- namesof("q", link.q, earg = earg.q), "\n",
- "Mean: scale*gamma(p + 1)*gamma(q - 1)/(gamma(p)*gamma(q))"),
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape2.p", lshape2.p, earg = eshape2.p), ", ",
+ namesof("shape3.q", lshape3.q, earg = eshape3.q), "\n",
+ "Mean: scale * gamma(shape2.p + 1) * ",
+ "gamma(shape3.q - 1) / ",
+ "(gamma(shape2.p) * gamma(shape3.q))"),
constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
@@ -6629,69 +6848,79 @@ dinvparalogistic <- function(x, a, scale = 1, log = FALSE)
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("scale", .link.scale, earg = .earg.scale, tag = FALSE),
- namesof("p", .link.p, earg = .earg.p, tag = FALSE),
- namesof("q", .link.q, earg = .earg.q, tag = FALSE))
+ c(namesof("scale", .lscale, earg = .escale, tag = FALSE),
+ namesof("shape2.p", .lshape2.p, earg = .eshape2.p, tag = FALSE),
+ namesof("shape3.q", .lshape3.q, earg = .eshape3.q, tag = FALSE))
- if (!length( .init.scale )) {
+ if (!length( .iscale )) {
qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
- init.q = if (length( .init.q)) .init.q else 1
- xvec = log( (1-qvec)^(-1/ init.q ) - 1 )
+ ishape3.q = if (length( .ishape3.q)) .ishape3.q else 1
+ xvec = log( (1-qvec)^(-1/ ishape3.q ) - 1 )
fit0 = lsfit(x = xvec, y=log(quantile(y, qvec )))
}
if (!length(etastart)) {
- scale = rep(if (length( .init.scale )) .init.scale else
+ scale = rep(if (length( .iscale )) .iscale else
exp(fit0$coef[1]), length = n)
- qq = rep(if (length( .init.q)) .init.q else 1.0, length = n)
- parg = rep(if (length( .init.p)) .init.p else 1.0, length = n)
- etastart = cbind(theta2eta(scale, .link.scale, earg = .earg.scale),
- theta2eta(parg, .link.p, earg = .earg.p),
- theta2eta(qq, .link.q, earg = .earg.q))
- }
- }), list( .link.scale = link.scale,
- .link.p = link.p, .link.q = link.q,
- .earg.scale = earg.scale,
- .earg.p = earg.p, .earg.q = earg.q,
- .init.scale = init.scale,
- .init.p = init.p, .init.q=init.q ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- aa = 1
- scale = eta2theta(eta[,1], .link.scale, earg = .earg.scale)
- parg = eta2theta(eta[,2], .link.p, earg = .earg.p)
- qq = eta2theta(eta[,3], .link.q, earg = .earg.q)
- scale*gamma(parg + 1/aa)*gamma(qq-1/aa)/(gamma(parg)*gamma(qq))
- }, list( .link.scale = link.scale,
- .earg.scale = earg.scale,
- .earg.p = earg.p, .earg.q = earg.q,
- .link.p = link.p, .link.q = link.q ))),
- last = eval(substitute(expression({
- misc$link = c(scale = .link.scale, p = .link.p, q= .link.q)
- misc$earg = list(scale = .earg.scale, p = .earg.p, q= .earg.q)
- }), list( .link.scale = link.scale,
- .earg.scale = earg.scale,
- .earg.p = earg.p, .earg.q = earg.q,
- .link.p = link.p, .link.q = link.q ))),
+ qq = rep(if (length( .ishape3.q)) .ishape3.q else 1.0, length = n)
+ parg = rep(if (length( .ishape2.p)) .ishape2.p else 1.0, length = n)
+ etastart = cbind(theta2eta(scale, .lscale, earg = .escale),
+ theta2eta(parg, .lshape2.p, earg = .eshape2.p),
+ theta2eta(qq, .lshape3.q, earg = .eshape3.q))
+ }
+ }), list( .lscale = lscale,
+ .escale = escale,
+ .lshape2.p = lshape2.p, .lshape3.q = lshape3.q,
+ .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
+ .iscale = iscale,
+ .ishape2.p = ishape2.p,
+ .ishape3.q = ishape3.q ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ aa = 1
+ Scale = eta2theta(eta[, 1], .lscale, earg = .escale)
+ parg = eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
+ qq = eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
+
+ ans = Scale * exp(lgamma(parg + 1/aa) +
+ lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
+ ans[parg + 1/aa <= 0] = NA
+ ans[qq - 1/aa <= 0] = NA
+ ans[Scale <= 0] = NA
+ ans[parg <= 0] = NA
+ ans[qq <= 0] = NA
+ ans
+ }, list( .lscale = lscale,
+ .escale = escale,
+ .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
+ .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
+ last = eval(substitute(expression({
+ misc$link = c(scale = .lscale, shape2.p = .lshape2.p, shape3.q = .lshape3.q)
+ misc$earg = list(scale = .escale, shape2.p = .eshape2.p, shape3.q = .eshape3.q)
+ }), list( .lscale = lscale,
+ .escale = escale,
+ .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
+ .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
aa = 1
- scale = eta2theta(eta[,1], .link.scale, earg = .earg.scale)
- parg = eta2theta(eta[,2], .link.p, earg = .earg.p)
- qq = eta2theta(eta[,3], .link.q, earg = .earg.q)
+ scale = eta2theta(eta[, 1], .lscale, earg = .escale)
+ parg = eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
+ qq = eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else
- sum(w*(log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
- (-lbeta(parg, qq)) - (parg+qq)*log1p((y/scale)^aa)))
- }, list( .link.scale = link.scale,
- .earg.scale = earg.scale,
- .earg.p = earg.p, .earg.q = earg.q,
- .link.p = link.p, .link.q = link.q ))),
+ "not implemented yet") else {
+ sum(w * (log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
+ (-lbeta(parg, qq)) - (parg+qq)*log1p((y/scale)^aa)))
+ }
+ }, list( .lscale = lscale,
+ .escale = escale,
+ .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
+ .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
vfamily = c("betaII"),
deriv = eval(substitute(expression({
aa = 1
- scale = eta2theta(eta[,1], .link.scale, earg = .earg.scale)
- parg = eta2theta(eta[,2], .link.p, earg = .earg.p)
- qq = eta2theta(eta[,3], .link.q, earg = .earg.q)
+ scale = eta2theta(eta[, 1], .lscale, earg = .escale)
+ parg = eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
+ qq = eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
temp1 = log(y/scale)
temp2 = (y/scale)^aa
@@ -6703,16 +6932,16 @@ dinvparalogistic <- function(x, a, scale = 1, log = FALSE)
dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
dl.dp = aa * temp1 + temp3 - temp3a - temp4
dl.dq = temp3 - temp3b - temp4
- dscale.deta = dtheta.deta(scale, .link.scale, earg = .earg.scale)
- dp.deta = dtheta.deta(parg, .link.p, earg = .earg.p)
- dq.deta = dtheta.deta(qq, .link.q, earg = .earg.q)
+ dscale.deta = dtheta.deta(scale, .lscale, earg = .escale)
+ dp.deta = dtheta.deta(parg, .lshape2.p, earg = .eshape2.p)
+ dq.deta = dtheta.deta(qq, .lshape3.q, earg = .eshape3.q)
c(w) * cbind( dl.dscale * dscale.deta,
dl.dp * dp.deta,
dl.dq * dq.deta )
- }), list( .link.scale = link.scale,
- .earg.scale = earg.scale,
- .earg.p = earg.p, .earg.q = earg.q,
- .link.p = link.p, .link.q = link.q ))),
+ }), list( .lscale = lscale,
+ .escale = escale,
+ .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
+ .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
weight = eval(substitute(expression({
temp5 = trigamma(parg + qq)
ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
@@ -6730,37 +6959,40 @@ dinvparalogistic <- function(x, a, scale = 1, log = FALSE)
wz[,iam(2,3,M)] = ed2l.dpq * dp.deta * dq.deta
wz = c(w) * wz
wz
- }), list( .link.scale = link.scale,
- .earg.scale = earg.scale,
- .earg.p = earg.p, .earg.q = earg.q,
- .link.p = link.p, .link.q = link.q ))))
+ }), list( .lscale = lscale,
+ .escale = escale,
+ .eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
+ .lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))))
}
- lomax = function(link.scale = "loge",
- link.q = "loge",
- earg.scale = list(), earg.q = list(),
- init.scale = NULL,
- init.q=1.0,
+ lomax = function(lscale = "loge",
+ lshape3.q = "loge",
+ escale = list(), eshape3.q = list(),
+ iscale = NULL,
+ ishape3.q = 2.0,
zero = NULL)
{
- if (mode(link.scale) != "character" && mode(link.scale) != "name")
- link.scale = as.character(substitute(link.scale))
- if (mode(link.q) != "character" && mode(link.q) != "name")
- link.q = as.character(substitute(link.q))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
- stop("bad input for argument 'zero'")
- if (!is.list(earg.scale)) earg.scale = list()
- if (!is.list(earg.q)) earg.q = list()
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if (mode(lshape3.q) != "character" && mode(lshape3.q) != "name")
+ lshape3.q = as.character(substitute(lshape3.q))
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+ if (!is.list(escale)) escale = list()
+ if (!is.list(eshape3.q)) eshape3.q = list()
new("vglmff",
blurb = c("Lomax distribution\n\n",
"Links: ",
- namesof("scale", link.scale, earg = earg.scale), ", ",
- namesof("q", link.q, earg = earg.q), "\n",
- "Mean: scale/(q-1)"),
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape3.q", lshape3.q, earg = eshape3.q), "\n",
+ "Mean: scale / (shape3.q - 1)"),
constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
@@ -6768,66 +7000,80 @@ dinvparalogistic <- function(x, a, scale = 1, log = FALSE)
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("scale", .link.scale, earg = .earg.scale, tag = FALSE),
- namesof("q", .link.q, earg = .earg.q, tag = FALSE))
+ c(namesof("scale", .lscale, earg = .escale, tag = FALSE),
+ namesof("shape3.q", .lshape3.q, earg = .eshape3.q, tag = FALSE))
aa = parg = 1
- if (!length( .init.scale )) {
+ if (!length( .iscale )) {
qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
- init.q = if (length( .init.q)) .init.q else 1
- xvec = log( (1-qvec)^(-1/ init.q ) - 1 )
+ ishape3.q = if (length( .ishape3.q)) .ishape3.q else 1
+ xvec = log( (1-qvec)^(-1/ ishape3.q ) - 1 )
fit0 = lsfit(x = xvec, y=log(quantile(y, qvec )))
}
if (!length(etastart)) {
- qq = rep(if (length( .init.q)) .init.q else 1.0, length = n)
- scale = rep(if (length( .init.scale )) .init.scale else
+ qq = rep(if (length( .ishape3.q)) .ishape3.q else 1.0, length = n)
+ scale = rep(if (length( .iscale )) .iscale else
exp(fit0$coef[1]), length = n)
- etastart = cbind(theta2eta(scale, .link.scale, earg = .earg.scale),
- theta2eta(qq, .link.q, earg = .earg.q))
+ etastart = cbind(theta2eta(scale, .lscale, earg = .escale),
+ theta2eta(qq, .lshape3.q, earg = .eshape3.q))
}
- }), list( .link.scale = link.scale, .link.q = link.q,
- .earg.scale = earg.scale, .earg.q = earg.q,
- .init.scale = init.scale, .init.q=init.q ))),
+ }), list( .lscale = lscale, .lshape3.q = lshape3.q,
+ .escale = escale, .eshape3.q = eshape3.q,
+ .iscale = iscale, .ishape3.q = ishape3.q ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- scale = eta2theta(eta[,1], .link.scale, earg = .earg.scale)
- qq = eta2theta(eta[,2], .link.q, earg = .earg.q)
- scale/(qq-1)
- }, list( .link.scale = link.scale, .link.q = link.q,
- .earg.scale = earg.scale, .earg.q = earg.q ))),
+ aa = 1
+ Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
+ parg = 1
+ qq = eta2theta(eta[, 2], .lshape3.q , earg = .eshape3.q )
+
+
+
+
+
+ ans = Scale * exp(lgamma(parg + 1/aa) +
+ lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
+ ans[parg + 1/aa <= 0] = NA
+ ans[qq - 1/aa <= 0] = NA
+ ans[Scale <= 0] = NA
+ ans[qq <= 0] = NA
+ ans
+ }, list( .lscale = lscale, .lshape3.q = lshape3.q,
+ .escale = escale, .eshape3.q = eshape3.q ))),
last = eval(substitute(expression({
- misc$link = c(scale = .link.scale, q= .link.q)
- misc$earg = list(scale = .earg.scale, q= .earg.q)
- }), list( .link.scale = link.scale, .link.q = link.q,
- .earg.scale = earg.scale, .earg.q = earg.q ))),
+ misc$link = c(scale = .lscale, shape3.q = .lshape3.q)
+ misc$earg = list(scale = .escale, shape3.q = .eshape3.q)
+ }), list( .lscale = lscale, .lshape3.q = lshape3.q,
+ .escale = escale, .eshape3.q = eshape3.q ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
aa = 1
- scale = eta2theta(eta[,1], .link.scale, earg = .earg.scale)
+ scale = eta2theta(eta[, 1], .lscale, earg = .escale)
parg = 1
- qq = eta2theta(eta[,2], .link.q, earg = .earg.q)
+ qq = eta2theta(eta[, 2], .lshape3.q, earg = .eshape3.q)
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w * dlomax(x = y, scale = scale, q.arg=qq, log = TRUE))
+ sum(w * dlomax(x = y, scale = scale,
+ shape3.q = qq, log = TRUE))
}
- }, list( .link.scale = link.scale, .link.q = link.q,
- .earg.scale = earg.scale, .earg.q = earg.q ))),
+ }, list( .lscale = lscale, .lshape3.q = lshape3.q,
+ .escale = escale, .eshape3.q = eshape3.q ))),
vfamily = c("lomax"),
deriv = eval(substitute(expression({
aa = 1
- scale = eta2theta(eta[,1], .link.scale, earg = .earg.scale)
+ scale = eta2theta(eta[, 1], .lscale, earg = .escale)
parg = 1
- qq = eta2theta(eta[,2], .link.q, earg = .earg.q)
+ qq = eta2theta(eta[, 2], .lshape3.q, earg = .eshape3.q)
temp2 = (y/scale)^aa
dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
dl.dq = digamma(parg + qq) - digamma(qq) - log1p(temp2)
- dscale.deta = dtheta.deta(scale, .link.scale, earg = .earg.scale)
- dq.deta = dtheta.deta(qq, .link.q, earg = .earg.q)
+ dscale.deta = dtheta.deta(scale, .lscale, earg = .escale)
+ dq.deta = dtheta.deta(qq, .lshape3.q, earg = .eshape3.q)
c(w) * cbind( dl.dscale * dscale.deta,
dl.dq * dq.deta )
- }), list( .link.scale = link.scale, .link.q = link.q,
- .earg.scale = earg.scale, .earg.q = earg.q ))),
+ }), list( .lscale = lscale, .lshape3.q = lshape3.q,
+ .escale = escale, .eshape3.q = eshape3.q ))),
weight = eval(substitute(expression({
ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
ed2l.dq = 1/qq^2
@@ -6838,88 +7084,99 @@ dinvparalogistic <- function(x, a, scale = 1, log = FALSE)
wz[,iam(1,2,M)] = ed2l.dscaleq * dscale.deta * dq.deta
wz = c(w) * wz
wz
- }), list( .link.scale = link.scale, .link.q = link.q,
- .earg.scale = earg.scale, .earg.q = earg.q ))))
+ }), list( .lscale = lscale, .lshape3.q = lshape3.q,
+ .escale = escale, .eshape3.q = eshape3.q ))))
}
- fisk = function(link.a = "loge",
- link.scale = "loge",
- earg.a = list(), earg.scale = list(),
- init.a = NULL,
- init.scale = NULL,
+ fisk = function(lshape1.a = "loge",
+ lscale = "loge",
+ eshape1.a = list(), escale = list(),
+ ishape1.a = NULL,
+ iscale = NULL,
zero = NULL)
{
- if (mode(link.a) != "character" && mode(link.a) != "name")
- link.a = as.character(substitute(link.a))
- if (mode(link.scale) != "character" && mode(link.scale) != "name")
- link.scale = as.character(substitute(link.scale))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
- stop("bad input for argument 'zero'")
- if (!is.list(earg.a)) earg.a = list()
- if (!is.list(earg.scale)) earg.scale = list()
+ if (mode(lshape1.a) != "character" && mode(lshape1.a) != "name")
+ lshape1.a = as.character(substitute(lshape1.a))
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+ if (!is.list(eshape1.a)) eshape1.a = list()
+ if (!is.list(escale)) escale = list()
new("vglmff",
blurb = c("Fisk distribution\n\n",
"Links: ",
- namesof("a", link.a, earg = earg.a), ", ",
- namesof("scale", link.scale, earg = earg.scale), "\n",
- "Mean: scale * gamma(1 + 1/a) * gamma(1 - 1/a)"),
+ namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ",
+ namesof("scale", lscale, earg = escale), "\n",
+ "Mean: scale * gamma(1 + 1/shape1.a) * ",
+ "gamma(1 - 1/shape1.a)"),
constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
predictors.names =
- c(namesof("a", .link.a, earg = .earg.a, tag = FALSE),
- namesof("scale", .link.scale, earg = .earg.scale, tag = FALSE))
+ c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
+ namesof("scale", .lscale, earg = .escale, tag = FALSE))
qq = parg = 1
- if (!length( .init.scale )) {
+ if (!length( .iscale )) {
qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
xvec = log( 1/qvec - 1 )
fit0 = lsfit(x = xvec, y=log(quantile(y, qvec )))
}
if (!length(etastart)) {
- aa = rep(if (length( .init.a)) .init.a else -1/fit0$coef[2],
+ aa = rep(if (length( .ishape1.a)) .ishape1.a else -1/fit0$coef[2],
length = n)
- scale = rep(if (length( .init.scale )) .init.scale else
+ scale = rep(if (length( .iscale )) .iscale else
exp(fit0$coef[1]), length = n)
- etastart = cbind(theta2eta(aa, .link.a, earg = .earg.a),
- theta2eta(scale, .link.scale, earg = .earg.scale))
+ etastart = cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
+ theta2eta(scale, .lscale, earg = .escale))
}
- }), list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale,
- .init.a = init.a, .init.scale = init.scale ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
- qq = 1
- scale*gamma(1 + 1/aa)*gamma(1-1/aa)
- }, list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale ))),
- last = eval(substitute(expression({
- misc$link = c(a = .link.a, scale = .link.scale)
- misc$earg = list(a = .earg.a, scale = .earg.scale)
- }), list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale
- ))),
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .ishape1.a = ishape1.a, .iscale = iscale ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ parg = 1
+ qq = 1
+
+ ans = Scale * exp(lgamma(parg + 1/aa) +
+ lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
+ ans[parg + 1/aa <= 0] = NA
+ ans[qq - 1/aa <= 0] = NA
+ ans[aa <= 0] = NA
+ ans[Scale <= 0] = NA
+ ans
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale ))),
+ last = eval(substitute(expression({
+ misc$link = c(shape1.a = .lshape1.a, scale = .lscale)
+ misc$earg = list(shape1.a = .eshape1.a, scale = .escale)
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = eta2theta(eta[,1], .link.a, earg = .earg)
- scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
+ aa = eta2theta(eta[, 1], .lshape1.a, earg = .earg)
+ scale = eta2theta(eta[, 2], .lscale, earg = .escale)
parg = qq = 1
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w * dfisk(x = y, a=aa, scale = scale, log = TRUE))
+ sum(w * dfisk(x = y, shape1.a = aa, scale = scale, log = TRUE))
}
- }, list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale ))),
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale ))),
vfamily = c("fisk"),
deriv = eval(substitute(expression({
- aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
+ aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ scale = eta2theta(eta[, 2], .lscale, earg = .escale)
parg = qq = 1
temp1 = log(y/scale)
@@ -6929,12 +7186,12 @@ dinvparalogistic <- function(x, a, scale = 1, log = FALSE)
dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
- da.deta = dtheta.deta(aa, .link.a, earg = .earg.a)
- dscale.deta = dtheta.deta(scale, .link.scale, earg = .earg.scale)
+ da.deta = dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
+ dscale.deta = dtheta.deta(scale, .lscale, earg = .escale)
c(w) * cbind( dl.da * da.deta,
dl.dscale * dscale.deta )
- }), list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale ))),
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale ))),
weight = eval(substitute(expression({
ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
(temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
@@ -6948,33 +7205,36 @@ dinvparalogistic <- function(x, a, scale = 1, log = FALSE)
wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
wz = c(w) * wz
wz
- }), list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale ))))
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale ))))
}
- invlomax = function(link.scale = "loge",
- link.p = "loge",
- earg.scale = list(), earg.p = list(),
- init.scale = NULL,
- init.p=1.0,
+ invlomax = function(lscale = "loge",
+ lshape2.p = "loge",
+ escale = list(), eshape2.p = list(),
+ iscale = NULL,
+ ishape2.p = 1.0,
zero = NULL)
{
- if (mode(link.scale) != "character" && mode(link.scale) != "name")
- link.scale = as.character(substitute(link.scale))
- if (mode(link.p) != "character" && mode(link.p) != "name")
- link.p = as.character(substitute(link.p))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
- stop("bad input for argument 'zero'")
- if (!is.list(earg.scale)) earg.scale = list()
- if (!is.list(earg.p)) earg.p = list()
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
+ if (mode(lshape2.p) != "character" && mode(lshape2.p) != "name")
+ lshape2.p = as.character(substitute(lshape2.p))
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+ if (!is.list(escale)) escale = list()
+ if (!is.list(eshape2.p)) eshape2.p = list()
new("vglmff",
blurb = c("Inverse Lomax distribution\n\n",
"Links: ",
- namesof("scale", link.scale, earg = earg.scale), ", ",
- namesof("p", link.p, earg = earg.p), "\n",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape2.p", lshape2.p, earg = eshape2.p), "\n",
"Mean: does not exist"),
constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
@@ -6983,70 +7243,75 @@ dinvparalogistic <- function(x, a, scale = 1, log = FALSE)
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names =
- c(namesof("scale", .link.scale, earg = .earg.scale, tag = FALSE),
- namesof("p", .link.p, earg = .earg.p, tag = FALSE))
+ c(namesof("scale", .lscale, earg = .escale, tag = FALSE),
+ namesof("shape2.p", .lshape2.p, earg = .eshape2.p, tag = FALSE))
qq = aa = 1
- if (!length( .init.scale )) {
+ if (!length( .iscale )) {
qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
- init.p = if (length( .init.p)) .init.p else 1
- xvec = log( qvec^(-1/ init.p ) - 1 )
+ ishape2.p = if (length( .ishape2.p)) .ishape2.p else 1
+ xvec = log( qvec^(-1/ ishape2.p ) - 1 )
fit0 = lsfit(x = xvec, y=log(quantile(y, qvec )))
}
if (!length(etastart)) {
- scale = rep(if (length( .init.scale )) .init.scale else
+ scale = rep(if (length( .iscale )) .iscale else
exp(fit0$coef[1]), length = n)
- parg = rep(if (length( .init.p)) .init.p else 1.0, length = n)
- etastart = cbind(theta2eta(scale, .link.scale, earg = .earg.scale),
- theta2eta(parg, .link.p, earg = .earg.p))
- }
- }), list( .link.scale = link.scale,
- .link.p = link.p,
- .earg.scale = earg.scale,
- .earg.p = earg.p,
- .init.scale = init.scale,
- .init.p = init.p ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- rep(as.numeric(NA), len = nrow(eta))
- }, list( .link.scale = link.scale,
- .earg.scale = earg.scale,
- .earg.p = earg.p,
- .link.p = link.p ))),
- last = eval(substitute(expression({
- misc$link = c(scale = .link.scale, p = .link.p )
- misc$earg = list(scale = .earg.scale, p = .earg.p )
- }), list( .link.scale = link.scale,
- .earg.scale = earg.scale,
- .earg.p = earg.p,
- .link.p = link.p ))),
+ parg = rep(if (length( .ishape2.p)) .ishape2.p else 1.0, length = n)
+ etastart = cbind(theta2eta(scale, .lscale, earg = .escale),
+ theta2eta(parg, .lshape2.p, earg = .eshape2.p))
+ }
+ }), list( .lscale = lscale,
+ .lshape2.p = lshape2.p,
+ .escale = escale,
+ .eshape2.p = eshape2.p,
+ .iscale = iscale,
+ .ishape2.p = ishape2.p ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ Scale = eta2theta(eta[, 1], .lscale, earg = .escale)
+ parg = eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
+
+ NA * Scale
+ }, list( .lscale = lscale,
+ .escale = escale,
+ .eshape2.p = eshape2.p,
+ .lshape2.p = lshape2.p ))),
+ last = eval(substitute(expression({
+ misc$link = c(scale = .lscale, shape2.p = .lshape2.p )
+ misc$earg = list(scale = .escale, shape2.p = .eshape2.p )
+ }), list( .lscale = lscale,
+ .escale = escale,
+ .eshape2.p = eshape2.p,
+ .lshape2.p = lshape2.p ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = qq = 1
- scale = eta2theta(eta[,1], .link.scale, earg = .earg.scale)
- parg = eta2theta(eta[,2], .link.p, earg = .earg.p)
+ aa = 1
+ scale = eta2theta(eta[, 1], .lscale, earg = .escale)
+ parg = eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
+ qq = 1
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w * dinvlomax(x = y, scale = scale, p.arg=parg, log = TRUE))
+ sum(w * dinvlomax(x = y, scale = scale,
+ shape2.p = parg, log = TRUE))
}
- }, list( .link.scale = link.scale, .link.p = link.p,
- .earg.scale = earg.scale, .earg.p = earg.p ))),
+ }, list( .lscale = lscale, .lshape2.p = lshape2.p,
+ .escale = escale, .eshape2.p = eshape2.p ))),
vfamily = c("invlomax"),
deriv = eval(substitute(expression({
aa = qq = 1
- scale = eta2theta(eta[,1], .link.scale, earg = .earg.scale)
- parg = eta2theta(eta[,2], .link.p, earg = .earg.p)
+ scale = eta2theta(eta[, 1], .lscale, earg = .escale)
+ parg = eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
temp1 = log(y/scale)
temp2 = (y/scale)^aa
dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
dl.dp = aa * temp1 + digamma(parg + qq) - digamma(parg) - log1p(temp2)
- dscale.deta = dtheta.deta(scale, .link.scale, earg = .earg.scale)
- dp.deta = dtheta.deta(parg, .link.p, earg = .earg.p)
+ dscale.deta = dtheta.deta(scale, .lscale, earg = .escale)
+ dp.deta = dtheta.deta(parg, .lshape2.p, earg = .eshape2.p)
c(w) * cbind( dl.dscale * dscale.deta,
dl.dp * dp.deta )
- }), list( .link.scale = link.scale, .link.p = link.p,
- .earg.scale = earg.scale, .earg.p = earg.p ))),
+ }), list( .lscale = lscale, .lshape2.p = lshape2.p,
+ .escale = escale, .eshape2.p = eshape2.p ))),
weight = eval(substitute(expression({
ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
ed2l.dp = 1/parg^2
@@ -7057,243 +7322,277 @@ dinvparalogistic <- function(x, a, scale = 1, log = FALSE)
wz[,iam(1,2,M)] = ed2l.dscalep * dscale.deta * dp.deta
wz = c(w) * wz
wz
- }), list( .link.scale = link.scale, .link.p = link.p,
- .earg.scale = earg.scale, .earg.p = earg.p ))))
+ }), list( .lscale = lscale, .lshape2.p = lshape2.p,
+ .escale = escale, .eshape2.p = eshape2.p ))))
}
- paralogistic = function(link.a = "loge",
- link.scale = "loge",
- earg.a = list(), earg.scale = list(),
- init.a = 1.0,
- init.scale = NULL,
+ paralogistic = function(lshape1.a = "loge",
+ lscale = "loge",
+ eshape1.a = list(), escale = list(),
+ ishape1.a = 2,
+ iscale = NULL,
zero = NULL)
{
- if (mode(link.a) != "character" && mode(link.a) != "name")
- link.a = as.character(substitute(link.a))
- if (mode(link.scale) != "character" && mode(link.scale) != "name")
- link.scale = as.character(substitute(link.scale))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
- stop("bad input for argument 'zero'")
- if (!is.list(earg.a)) earg.a = list()
- if (!is.list(earg.scale)) earg.scale = list()
-
- new("vglmff",
- blurb = c("Paralogistic distribution\n\n",
- "Links: ",
- namesof("a", link.a, earg = earg.a), ", ",
- namesof("scale", link.scale, earg = earg.scale), "\n",
- "Mean: scale*gamma(1 + 1/a)*gamma(a - 1/a)/gamma(a)"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("a", .link.a, earg = .earg.a, tag = FALSE),
- namesof("scale", .link.scale, earg = .earg.scale, tag = FALSE))
- parg = 1
+ if (mode(lshape1.a) != "character" && mode(lshape1.a) != "name")
+ lshape1.a = as.character(substitute(lshape1.a))
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
- if (!length( .init.a) || !length( .init.scale )) {
- qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
- init.a = if (length( .init.a)) .init.a else 1
- xvec = log( (1-qvec)^(-1/ init.a ) - 1 )
- fit0 = lsfit(x = xvec, y=log(quantile(y, qvec )))
- }
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
- if (!length(etastart)) {
- aa = rep(if (length( .init.a)) .init.a else 1/fit0$coef[2],
- length = n)
- scale = rep(if (length( .init.scale )) .init.scale else
- exp(fit0$coef[1]), length = n)
- etastart = cbind(theta2eta(aa, .link.a, earg = .earg.a),
- theta2eta(scale, .link.scale, earg = .earg.scale))
- }
- }), list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale,
- .init.a = init.a, .init.scale = init.scale
- ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
- qq = aa
- scale*gamma(1 + 1/aa)*gamma(qq-1/aa)/(gamma(qq))
- }, list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale ))),
- last = eval(substitute(expression({
- misc$link = c(a = .link.a, scale = .link.scale)
- misc$earg = list(a = .earg.a, scale = .earg.scale )
- }), list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
- parg = 1
- qq = aa
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(w * dparalogistic(x = y, a=aa, scale = scale, log = TRUE))
- }
- }, list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale ))),
- vfamily = c("paralogistic"),
- deriv = eval(substitute(expression({
- aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
- parg = 1
- qq = aa
+ if (!is.list(eshape1.a)) eshape1.a = list()
+ if (!is.list(escale)) escale = list()
- temp1 = log(y/scale)
- temp2 = (y/scale)^aa
- temp3a = digamma(parg)
- temp3b = digamma(qq)
+ new("vglmff",
+ blurb = c("Paralogistic distribution\n\n",
+ "Links: ",
+ namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ",
+ namesof("scale", lscale, earg = escale), "\n",
+ "Mean: scale * gamma(1 + 1/shape1.a) * ",
+ "gamma(shape1.a - 1/shape1.a) / gamma(shape1.a)"),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ predictors.names =
+ c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
+ namesof("scale", .lscale, earg = .escale, tag = FALSE))
+ parg = 1
+
+ if (!length( .ishape1.a) || !length( .iscale )) {
+ qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
+ ishape1.a = if (length( .ishape1.a)) .ishape1.a else 1
+ xvec = log( (1-qvec)^(-1/ ishape1.a ) - 1 )
+ fit0 = lsfit(x = xvec, y = log(quantile(y, qvec )))
+ }
- dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
- dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
- da.deta = dtheta.deta(aa, .link.a, earg = .earg.a)
- dscale.deta = dtheta.deta(scale, .link.scale, earg = .earg.scale)
- c(w) * cbind( dl.da * da.deta,
- dl.dscale * dscale.deta)
- }), list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale ))),
- weight = eval(substitute(expression({
- ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
- (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
- (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
- ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
- ed2l.dascale = (parg - qq - parg*qq*(temp3a -temp3b)) /
- (scale*(1 + parg+qq))
- wz = matrix(as.numeric(NA), n, dimm(M)) #M==2 means 3=dimm(M)
- wz[,iam(1,1,M)] = ed2l.da * da.deta^2
- wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
- wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
- wz = c(w) * wz
- wz
- }), list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale ))))
+ if (!length(etastart)) {
+ aa = rep(if (length( .ishape1.a)) .ishape1.a else 1/fit0$coef[2],
+ length = n)
+ scale = rep(if (length( .iscale )) .iscale else
+ exp(fit0$coef[1]), length = n)
+ etastart = cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
+ theta2eta(scale, .lscale, earg = .escale))
+ }
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .ishape1.a = ishape1.a, .iscale = iscale
+ ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ aa = eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
+ Scale = eta2theta(eta[, 2], .lscale, earg = .escale )
+ parg = 1
+ qq = aa
+
+ ans = Scale * exp(lgamma(parg + 1/aa) +
+ lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
+ ans[parg + 1/aa <= 0] = NA
+ ans[qq - 1/aa <= 0] = NA
+ ans[aa <= 0] = NA
+ ans[Scale <= 0] = NA
+ ans
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale ))),
+ last = eval(substitute(expression({
+ misc$link = c(shape1.a = .lshape1.a, scale = .lscale)
+ misc$earg = list(shape1.a = .eshape1.a, scale = .escale )
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ parg = 1
+ qq = aa
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(w * dparalogistic(x = y, shape1.a = aa,
+ scale = scale, log = TRUE))
+ }
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale ))),
+ vfamily = c("paralogistic"),
+ deriv = eval(substitute(expression({
+ aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ parg = 1
+ qq = aa
+
+ temp1 = log(y/scale)
+ temp2 = (y/scale)^aa
+ temp3a = digamma(parg)
+ temp3b = digamma(qq)
+
+ dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
+ dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
+ da.deta = dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
+ dscale.deta = dtheta.deta(scale, .lscale, earg = .escale)
+ c(w) * cbind( dl.da * da.deta,
+ dl.dscale * dscale.deta)
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale ))),
+ weight = eval(substitute(expression({
+ ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
+ (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
+ (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
+ ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
+ ed2l.dascale = (parg - qq - parg*qq*(temp3a -temp3b)) /
+ (scale*(1 + parg+qq))
+ wz = matrix(as.numeric(NA), n, dimm(M)) #M==2 means 3=dimm(M)
+ wz[,iam(1,1,M)] = ed2l.da * da.deta^2
+ wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
+ wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
+ wz = c(w) * wz
+ wz
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale ))))
}
- invparalogistic = function(link.a = "loge",
- link.scale = "loge",
- earg.a = list(), earg.scale = list(),
- init.a = 1.0,
- init.scale = NULL,
+ invparalogistic = function(lshape1.a = "loge", lscale = "loge",
+ eshape1.a = list(), escale = list(),
+ ishape1.a = 2, iscale = NULL,
zero = NULL)
{
- if (mode(link.a) != "character" && mode(link.a) != "name")
- link.a = as.character(substitute(link.a))
- if (mode(link.scale) != "character" && mode(link.scale) != "name")
- link.scale = as.character(substitute(link.scale))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
- stop("bad input for argument 'zero'")
- if (!is.list(earg.a)) earg.a = list()
- if (!is.list(earg.scale)) earg.scale = list()
+ if (mode(lshape1.a) != "character" && mode(lshape1.a) != "name")
+ lshape1.a = as.character(substitute(lshape1.a))
+ if (mode(lscale) != "character" && mode(lscale) != "name")
+ lscale = as.character(substitute(lscale))
- new("vglmff",
- blurb = c("Inverse paralogistic distribution\n\n",
- "Links: ",
- namesof("a", link.a, earg = earg.a), ", ",
- namesof("scale", link.scale, earg = earg.scale), "\n",
- "Mean: scale*gamma(a + 1/a)*gamma(1 - 1/a)/gamma(a)"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("a", .link.a, earg = .earg.a, tag = FALSE),
- namesof("scale", .link.scale, earg = .earg.scale, tag = FALSE))
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
- if (!length( .init.a) || !length( .init.scale )) {
- qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
- init.p = if (length( .init.a)) .init.a else 1
- xvec = log( qvec^(-1/ init.p ) - 1 )
- fit0 = lsfit(x = xvec, y=log(quantile(y, qvec )))
- }
+ if (!is.list(eshape1.a)) eshape1.a = list()
+ if (!is.list(escale)) escale = list()
- qq = 1
- if (!length(etastart)) {
- aa = rep(if (length( .init.a)) .init.a else -1/fit0$coef[2],
- length = n)
- scale = rep(if (length( .init.scale )) .init.scale else
- exp(fit0$coef[1]), length = n)
- etastart = cbind(theta2eta(aa, .link.a, earg = .earg.a),
- theta2eta(scale, .link.scale, earg = .earg.scale))
- }
- }), list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale,
- .init.a = init.a, .init.scale = init.scale ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
- parg = aa
- qq = 1
- scale * gamma(parg + 1/aa) *
- gamma(qq - 1/aa) / (gamma(parg) * gamma(qq))
- }, list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale ))),
- last = eval(substitute(expression({
- misc$link = c(a = .link.a, scale = .link.scale )
- misc$earg = list(a = .earg.a, scale = .earg.scale )
- }), list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
- parg = aa
- qq = 1
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(w * dinvparalogistic(x = y, a=aa, scale = scale, log = TRUE))
- }
- }, list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale ))),
- vfamily = c("invparalogistic"),
- deriv = eval(substitute(expression({
- aa = eta2theta(eta[,1], .link.a, earg = .earg.a)
- scale = eta2theta(eta[,2], .link.scale, earg = .earg.scale)
- parg = aa
- qq = 1
+ new("vglmff",
+ blurb = c("Inverse paralogistic distribution\n\n",
+ "Links: ",
+ namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ",
+ namesof("scale", lscale, earg = escale), "\n",
+ "Mean: scale * gamma(shape1.a + 1/shape1.a) * ",
+ "gamma(1 - 1/shape1.a)/gamma(shape1.a)"),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
- temp1 = log(y/scale)
- temp2 = (y/scale)^aa
- temp3a = digamma(parg)
- temp3b = digamma(qq)
+ predictors.names =
+ c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
+ namesof("scale", .lscale, earg = .escale, tag = FALSE))
+
+ if (!length( .ishape1.a) || !length( .iscale )) {
+ qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
+ ishape2.p = if (length( .ishape1.a )) .ishape1.a else 1
+ xvec = log( qvec^(-1/ ishape2.p ) - 1 )
+ fit0 = lsfit(x = xvec, y = log(quantile(y, qvec )))
+ }
- dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
- dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
- da.deta = dtheta.deta(aa, .link.a, earg = .earg.a)
- dscale.deta = dtheta.deta(scale, .link.scale, earg = .earg.scale)
- c(w) * cbind( dl.da * da.deta,
- dl.dscale * dscale.deta )
- }), list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale ))),
- weight = eval(substitute(expression({
- ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
- (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
- (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
- ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
- ed2l.dascale = (parg - qq - parg*qq*(temp3a -temp3b)) /
- (scale*(1 + parg+qq))
- wz = matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
- wz[,iam(1,1,M)] = ed2l.da * da.deta^2
- wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
- wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
- wz = c(w) * wz
- wz
- }), list( .link.a = link.a, .link.scale = link.scale,
- .earg.a = earg.a, .earg.scale = earg.scale ))))
+ qq = 1
+ if (!length(etastart)) {
+ aa = rep(if (length( .ishape1.a)) .ishape1.a else -1/fit0$coef[2],
+ length = n)
+ scale = rep(if (length( .iscale )) .iscale else
+ exp(fit0$coef[1]), length = n)
+ etastart = cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
+ theta2eta(scale, .lscale, earg = .escale))
+ }
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .ishape1.a = ishape1.a, .iscale = iscale ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ parg = aa
+ qq = 1
+
+ ans = Scale * exp(lgamma(parg + 1/aa) +
+ lgamma(qq - 1/aa) - lgamma(parg) - lgamma(qq))
+ ans[parg + 1/aa <= 0] = NA
+ ans[qq - 1/aa <= 0] = NA
+ ans[aa <= 0] = NA
+ ans[Scale <= 0] = NA
+ ans
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale ))),
+ last = eval(substitute(expression({
+ misc$link = c(shape1.a = .lshape1.a, scale = .lscale )
+ misc$earg = list(shape1.a = .eshape1.a, scale = .escale )
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ parg = aa
+ qq = 1
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(w * dinvparalogistic(x = y, shape1.a = aa,
+ scale = scale, log = TRUE))
+ }
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale ))),
+ vfamily = c("invparalogistic"),
+ deriv = eval(substitute(expression({
+ aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ parg = aa
+ qq = 1
+
+ temp1 = log(y/scale)
+ temp2 = (y/scale)^aa
+ temp3a = digamma(parg)
+ temp3b = digamma(qq)
+
+ dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
+ dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
+ da.deta = dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
+ dscale.deta = dtheta.deta(scale, .lscale, earg = .escale)
+ c(w) * cbind( dl.da * da.deta,
+ dl.dscale * dscale.deta )
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale ))),
+
+ weight = eval(substitute(expression({
+ ed2l.da = (1 + parg + qq +
+ parg * qq * (trigamma(parg) + trigamma(qq) +
+ (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
+ (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1 + parg + qq))
+ ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
+ ed2l.dascale = (parg - qq - parg*qq*(temp3a -temp3b)) /
+ (scale*(1 + parg+qq))
+
+ wz = matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
+ wz[,iam(1,1,M)] = ed2l.da * da.deta^2
+ wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
+ wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
+ wz = c(w) * wz
+ wz
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale ))))
}
+
+
+
+
+
+
+
+
if (FALSE)
genlognormal = function(link.sigma = "loge", link.r = "loge",
esigma = list(), er = list(),
@@ -7305,12 +7604,17 @@ warning("2/4/04; doesn't work, possibly because first derivs are ",
"matrices). Possibly fundamentally cannot be estimated by IRLS. ",
"Pooling doesn't seem to help")
+
+
if (mode(link.sigma) != "character" && mode(link.sigma) != "name")
link.sigma = as.character(substitute(link.sigma))
if (mode(link.r) != "character" && mode(link.r) != "name")
link.r = as.character(substitute(link.r))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
- stop("bad input for argument 'zero'")
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
if (!is.list(esigma)) esigma = list()
if (!is.list(er)) er = list()
@@ -7332,155 +7636,171 @@ warning("2/4/04; doesn't work, possibly because first derivs are ",
namesof("r", .link.r, earg = .er, tag = FALSE))
if (!length( .init.sigma) || !length( .init.r)) {
- init.r = if (length( .init.r)) .init.r else 1
- sigma.init = (0.5 * sum(abs(log(y) - mean(log(y )))^init.r))^(1/init.r)
+ init.r = if (length( .init.r)) .init.r else 1
+ sigma.init = (0.5 *
+ sum(abs(log(y) - mean(log(y )))^init.r))^(1/init.r)
}
if (any(y <= 0)) stop("y must be positive")
if (!length(etastart)) {
sigma.init = rep(if (length( .init.sigma)) .init.sigma else
- sigma.init, len = n)
+ sigma.init, length.out = n)
r.init = if (length( .init.r)) .init.r else init.r
- etastart = cbind(mu=rep(log(median(y)), len = n),
+ etastart = cbind(mu=rep(log(median(y)), length.out = n),
sigma=sigma.init,
r = r.init)
}
}), list( .link.sigma = link.sigma, .link.r = link.r,
- .init.sigma=init.sigma, .init.r=init.r ))),
+ .init.sigma = init.sigma, .init.r = init.r ))),
+
linkinv = eval(substitute(function(eta, extra = NULL) {
- mymu = eta2theta(eta[,1], "identity", earg = list())
- sigma = eta2theta(eta[,2], .link.sigma, earg = .esigma)
- r = eta2theta(eta[,3], .link.r, earg = .er)
+ mymu = eta2theta(eta[, 1], "identity", earg = list())
+ sigma = eta2theta(eta[, 2], .link.sigma, earg = .esigma)
+ r = eta2theta(eta[, 3], .link.r, earg = .er)
r
}, list( .link.sigma = link.sigma, .link.r = link.r ))),
- last = eval(substitute(expression({
- misc$link = c(loc = "identity", "sigma" = .link.sigma, r = .link.r )
- misc$expected = TRUE
- }), list( .link.sigma = link.sigma, .link.r = link.r ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mymu = eta2theta(eta[,1], "identity", earg = list())
- sigma = eta2theta(eta[,2], .link.sigma, earg = .esigma)
- r = eta2theta(eta[,3], .link.r, earg = .er)
- temp89 = (abs(log(y)-mymu)/sigma)^r
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else
- sum(w * (-log(r^(1/r) * sigma) - lgamma(1+1/r) - temp89/r))
- }, list( .link.sigma = link.sigma, .link.r = link.r ))),
- vfamily = c("genlognormal3"),
- deriv = eval(substitute(expression({
- mymu = eta2theta(eta[,1], "identity", earg = list())
- sigma = eta2theta(eta[,2], .link.sigma, earg = .esigma)
- r = eta2theta(eta[,3], .link.r, earg = .er)
- ss = 1 + 1/r
- temp33 = (abs(log(y)-mymu)/sigma)
- temp33r1 = temp33^(r-1)
- dl.dmymu = temp33r1 * sign(log(y)-mymu) / sigma
- dl.dsigma = (temp33*temp33r1 - 1) / sigma
- dl.dr = (log(r) - 1 + digamma(ss) + temp33*temp33r1)/r^2 -
- temp33r1 * log(temp33r1) / r
-
- dmymu.deta = dtheta.deta(mymu, "identity", earg = list())
- dsigma.deta = dtheta.deta(sigma, .link.sigma, earg = .esigma)
- dr.deta = dtheta.deta(r, .link.r, earg = .er)
- c(w) * cbind(dl.dmymu * dmymu.deta,
- dl.dsigma * dsigma.deta,
- dl.dr * dr.deta)
- }), list( .link.sigma = link.sigma, .link.r = link.r ))),
- weight = expression({
- wz = matrix(0, n, 6) # 5 will have small savings of 1 column
- B = log(r) + digamma(ss)
- ed2l.dmymu2 = (r-1) * gamma(1-1/r) / (sigma^2 * r^(2/r) * gamma(ss))
- ed2l.dsigma2 = r / sigma^2
- ed2l.dr2 = (ss * trigamma(ss) + B^2 - 1) / r^3
- ed2l.dsigmar = -B / (r * sigma)
- wz[,iam(1,1,M)] = ed2l.dmymu2 * dmymu.deta^2
- wz[,iam(2,2,M)] = ed2l.dsigma2 * dsigma.deta^2
- wz[,iam(3,3,M)] = ed2l.dr2 * dr.deta^2
- wz[,iam(2,3,M)] = ed2l.dsigmar * dsigma.deta * dr.deta
- wz = c(w) * wz
- wz
- }))
+
+ last = eval(substitute(expression({
+ misc$link = c(loc = "identity", "sigma" = .link.sigma, r = .link.r )
+ misc$expected = TRUE
+ }), list( .link.sigma = link.sigma, .link.r = link.r ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ mymu = eta2theta(eta[, 1], "identity", earg = list())
+ sigma = eta2theta(eta[, 2], .link.sigma, earg = .esigma)
+ r = eta2theta(eta[, 3], .link.r, earg = .er)
+ temp89 = (abs(log(y)-mymu)/sigma)^r
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else
+ sum(w * (-log(r^(1/r) * sigma) - lgamma(1+1/r) - temp89/r))
+ }, list( .link.sigma = link.sigma, .link.r = link.r ))),
+ vfamily = c("genlognormal3"),
+ deriv = eval(substitute(expression({
+ mymu = eta2theta(eta[, 1], "identity", earg = list())
+ sigma = eta2theta(eta[, 2], .link.sigma, earg = .esigma)
+
+ r = eta2theta(eta[, 3], .link.r, earg = .er)
+ ss = 1 + 1/r
+ temp33 = (abs(log(y)-mymu)/sigma)
+ temp33r1 = temp33^(r-1)
+
+ dl.dmymu = temp33r1 * sign(log(y)-mymu) / sigma
+ dl.dsigma = (temp33*temp33r1 - 1) / sigma
+ dl.dr = (log(r) - 1 + digamma(ss) + temp33*temp33r1)/r^2 -
+ temp33r1 * log(temp33r1) / r
+
+ dmymu.deta = dtheta.deta(mymu, "identity", earg = list())
+ dsigma.deta = dtheta.deta(sigma, .link.sigma, earg = .esigma)
+ dr.deta = dtheta.deta(r, .link.r, earg = .er)
+
+ c(w) * cbind(dl.dmymu * dmymu.deta,
+ dl.dsigma * dsigma.deta,
+ dl.dr * dr.deta)
+ }), list( .link.sigma = link.sigma, .link.r = link.r ))),
+ weight = expression({
+ wz = matrix(0, n, 6) # 5 will have small savings of 1 column
+
+ B = log(r) + digamma(ss)
+ ed2l.dmymu2 = (r-1) * gamma(1-1/r) / (sigma^2 * r^(2/r) * gamma(ss))
+ ed2l.dsigma2 = r / sigma^2
+ ed2l.dr2 = (ss * trigamma(ss) + B^2 - 1) / r^3
+ ed2l.dsigmar = -B / (r * sigma)
+
+ wz[,iam(1,1,M)] = ed2l.dmymu2 * dmymu.deta^2
+ wz[,iam(2,2,M)] = ed2l.dsigma2 * dsigma.deta^2
+ wz[,iam(3,3,M)] = ed2l.dr2 * dr.deta^2
+ wz[,iam(2,3,M)] = ed2l.dsigmar * dsigma.deta * dr.deta
+ wz = c(w) * wz
+ wz
+ }))
}
+
+
betaprime = function(link = "loge", earg = list(), i1=2, i2 = NULL, zero = NULL)
{
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
if (!is.list(earg)) earg = list()
- new("vglmff",
- blurb = c("Beta-prime distribution\n",
- "y^(shape1-1) * (1+y)^(-shape1-shape2) / Beta(shape1,shape2),",
- " y>0, shape1>0, shape2>0\n\n",
- "Links: ",
- namesof("shape1", link, earg = earg), ", ",
- namesof("shape2", link, earg = earg), "\n",
- "Mean: shape1/(shape2-1) provided shape2>1"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(y <- as.matrix(y)) > 1)
- stop("betaprime cannot handle matrix responses yet")
- if (min(y) <= 0)
- stop("response must be positive")
- predictors.names = c(namesof("shape1", .link, earg = .earg, short = TRUE),
- namesof("shape2", .link, earg = .earg, short = TRUE))
- if (is.numeric( .i1) && is.numeric( .i2)) {
- vec = c( .i1, .i2)
- vec = c(theta2eta(vec[1], .link, earg = .earg),
- theta2eta(vec[2], .link, earg = .earg))
- etastart = matrix(vec, n, 2, byrow= TRUE)
- }
- if (!length(etastart)) {
- init1 = if (length( .i1)) rep( .i1, len = n) else rep(1, len = n)
- init2 = if (length( .i2)) rep( .i2, len = n) else 1 + init1 / (y + 0.1)
- etastart = matrix(theta2eta(c(init1, init2), .link, earg = .earg),
- n,2,byrow = TRUE)
- }
- }), list( .link = link, .earg = earg, .i1=i1, .i2=i2 ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- shapes = eta2theta(eta, .link, earg = .earg)
- ifelse(shapes[,2] > 1, shapes[,1]/(shapes[,2]-1), NA)
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = c(shape1 = .link, shape2 = .link)
- misc$earg = list(shape1 = .earg, shape2 = .earg)
- }), list( .link = link, .earg = earg ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL){
- shapes = eta2theta(eta, .link, earg = .earg)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(w *((shapes[,1]-1) * log(y) - lbeta(shapes[,1], shapes[,2]) -
- (shapes[,2]+shapes[,1]) * log1p(y)))
- }
- }, list( .link = link, .earg = earg ))),
- vfamily = "betaprime",
- deriv = eval(substitute(expression({
- shapes = eta2theta(eta, .link, earg = .earg)
- dshapes.deta = dtheta.deta(shapes, .link, earg = .earg)
- dl.dshapes = cbind(log(y) - log1p(y) - digamma(shapes[,1]) +
- digamma(shapes[,1]+shapes[,2]),
- - log1p(y) - digamma(shapes[,2]) +
- digamma(shapes[,1]+shapes[,2]))
- c(w) * dl.dshapes * dshapes.deta
- }), list( .link = link, .earg = earg ))),
- weight = expression({
- temp2 = trigamma(shapes[,1]+shapes[,2])
- d2l.dshape12 = temp2 - trigamma(shapes[,1])
- d2l.dshape22 = temp2 - trigamma(shapes[,2])
- d2l.dshape1shape2 = temp2
-
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
- wz[,iam(1,1,M)] = d2l.dshape12 * dshapes.deta[,1]^2
- wz[,iam(2,2,M)] = d2l.dshape22 * dshapes.deta[,2]^2
- wz[,iam(1,2,M)] = d2l.dshape1shape2 * dshapes.deta[,1] * dshapes.deta[,2]
+ new("vglmff",
+ blurb = c("Beta-prime distribution\n",
+ "y^(shape1-1) * (1+y)^(-shape1-shape2) / Beta(shape1,shape2),",
+ " y>0, shape1>0, shape2>0\n\n",
+ "Links: ",
+ namesof("shape1", link, earg = earg), ", ",
+ namesof("shape2", link, earg = earg), "\n",
+ "Mean: shape1/(shape2-1) provided shape2>1"),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (ncol(y <- as.matrix(y)) > 1)
+ stop("betaprime cannot handle matrix responses yet")
+ if (min(y) <= 0)
+ stop("response must be positive")
+ predictors.names =
+ c(namesof("shape1", .link, earg = .earg, short = TRUE),
+ namesof("shape2", .link, earg = .earg, short = TRUE))
+ if (is.numeric( .i1) && is.numeric( .i2)) {
+ vec = c( .i1, .i2)
+ vec = c(theta2eta(vec[1], .link, earg = .earg),
+ theta2eta(vec[2], .link, earg = .earg))
+ etastart = matrix(vec, n, 2, byrow= TRUE)
+ }
+ if (!length(etastart)) {
+ init1 = if (length( .i1))
+ rep( .i1, length.out = n) else rep(1, length.out = n)
+ init2 = if (length( .i2))
+ rep( .i2, length.out = n) else 1 + init1 / (y + 0.1)
+ etastart = matrix(theta2eta(c(init1, init2), .link, earg = .earg),
+ n, 2, byrow = TRUE)
+ }
+ }), list( .link = link, .earg = earg, .i1=i1, .i2=i2 ))),
- -c(w) * wz
- }))
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ shapes = eta2theta(eta, .link, earg = .earg)
+ ifelse(shapes[, 2] > 1, shapes[, 1] / (shapes[, 2]-1), NA)
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link = c(shape1 = .link, shape2 = .link)
+ misc$earg = list(shape1 = .earg, shape2 = .earg)
+ }), list( .link = link, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL){
+ shapes = eta2theta(eta, .link, earg = .earg)
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(w *((shapes[, 1]-1) * log(y) -
+ lbeta(shapes[, 1], shapes[, 2]) -
+ (shapes[, 2]+shapes[, 1]) * log1p(y)))
+ }
+ }, list( .link = link, .earg = earg ))),
+ vfamily = "betaprime",
+ deriv = eval(substitute(expression({
+ shapes = eta2theta(eta, .link, earg = .earg)
+ dshapes.deta = dtheta.deta(shapes, .link, earg = .earg)
+ dl.dshapes = cbind(log(y) - log1p(y) - digamma(shapes[, 1]) +
+ digamma(shapes[, 1]+shapes[, 2]),
+ - log1p(y) - digamma(shapes[, 2]) +
+ digamma(shapes[, 1]+shapes[, 2]))
+ c(w) * dl.dshapes * dshapes.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = expression({
+ temp2 = trigamma(shapes[, 1] + shapes[, 2])
+ d2l.dshape12 = temp2 - trigamma(shapes[, 1])
+ d2l.dshape22 = temp2 - trigamma(shapes[, 2])
+ d2l.dshape1shape2 = temp2
+
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz[,iam(1,1,M)] = d2l.dshape12 * dshapes.deta[, 1]^2
+ wz[,iam(2,2,M)] = d2l.dshape22 * dshapes.deta[, 2]^2
+ wz[,iam(1,2,M)] = d2l.dshape1shape2 *
+ dshapes.deta[, 1] * dshapes.deta[, 2]
+
+ -c(w) * wz
+ }))
}
@@ -7489,45 +7809,46 @@ warning("2/4/04; doesn't work, possibly because first derivs are ",
dmaxwell = function(x, a, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
- L = max(length(x), length(a))
- x = rep(x, len = L); a = rep(a, len = L);
- logdensity = rep(log(0), len = L)
- xok = (x > 0)
- logdensity[xok] = 0.5 * log(2/pi) + 1.5 * log(a[xok]) +
- 2 * log(x[xok]) - 0.5 * a[xok] * x[xok]^2
- if (log.arg) logdensity else exp(logdensity)
+ L = max(length(x), length(a))
+ x = rep(x, length.out = L); a = rep(a, length.out = L);
+ logdensity = rep(log(0), length.out = L)
+ xok = (x > 0)
+ logdensity[xok] = 0.5 * log(2/pi) + 1.5 * log(a[xok]) +
+ 2 * log(x[xok]) - 0.5 * a[xok] * x[xok]^2
+ if (log.arg) logdensity else exp(logdensity)
}
pmaxwell = function(q, a) {
- if (any(a <= 0)) stop("argument 'a' must be positive")
- L = max(length(q), length(a))
- q = rep(q, len = L); a = rep(a, len = L);
- ifelse(q > 0, erf(q*sqrt(a/2)) - q*exp(-0.5*a*q^2) * sqrt(2*a/pi), 0)
+ if (any(a <= 0))
+ stop("argument 'a' must be positive")
+ L = max(length(q), length(a))
+ q = rep(q, length.out = L); a = rep(a, length.out = L);
+ ifelse(q > 0, erf(q*sqrt(a/2)) - q*exp(-0.5*a*q^2) * sqrt(2*a/pi), 0)
}
rmaxwell = function(n, a) {
- if (!is.Numeric(n, posit = TRUE, allow = 1))
- stop("bad input for argument 'n'")
- if (any(a <= 0)) stop("argument 'a' must be positive")
- sqrt(2 * rgamma(n = n, 1.5) / a)
+
+ sqrt(2 * rgamma(n = n, 1.5) / a)
}
qmaxwell = function(p, a) {
- if (!is.Numeric(p, posit = TRUE) || any(p >= 1))
- stop("bad input for argument 'p'")
- if (any(a <= 0)) stop("argument 'a' must be positive")
- N = max(length(p), length(a)); p = rep(p, len = N); a = rep(a, len = N)
- sqrt(2 * qgamma(p=p, 1.5) / a)
+ if (!is.Numeric(p, positive = TRUE) || any(p >= 1))
+ stop("bad input for argument 'p'")
+ if (any(a <= 0)) stop("argument 'a' must be positive")
+ N = max(length(p), length(a)); p = rep(p, length.out = N); a = rep(a, length.out = N)
+ sqrt(2 * qgamma(p = p, 1.5) / a)
}
+
+
maxwell = function(link = "loge", earg = list()) {
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
@@ -7585,11 +7906,11 @@ dnaka = function(x, shape, scale = 1, log = FALSE) {
rm(log)
L = max(length(x), length(shape), length(scale))
- x = rep(x, len = L)
- shape = rep(shape, len = L)
- scale = rep(scale, len = L);
+ x = rep(x, length.out = L)
+ shape = rep(shape, length.out = L)
+ scale = rep(scale, length.out = L);
- logdensity = rep(log(0), len = L)
+ logdensity = rep(log(0), length.out = L)
xok = (x > 0)
logdensity[xok] = dgamma(x = x[xok]^2, shape = shape[xok],
scale = scale[xok]/shape[xok], log = TRUE) +
@@ -7601,31 +7922,31 @@ dnaka = function(x, shape, scale = 1, log = FALSE) {
pnaka = function(q, shape, scale = 1) {
if (!is.Numeric(q))
stop("bad input for argument 'q'")
- if (!is.Numeric(shape, posit = TRUE))
+ if (!is.Numeric(shape, positive = TRUE))
stop("bad input for argument 'shape'")
- if (!is.Numeric(scale, posit = TRUE))
+ if (!is.Numeric(scale, positive = TRUE))
stop("bad input for argument 'scale'")
L = max(length(q), length(shape), length(scale))
- q = rep(q, len = L)
- shape = rep(shape, len = L)
- scale = rep(scale, len = L);
+ q = rep(q, length.out = L)
+ shape = rep(shape, length.out = L)
+ scale = rep(scale, length.out = L);
ifelse(q <= 0, 0, pgamma(shape * q^2 / scale, shape))
}
qnaka = function(p, shape, scale = 1, ...) {
- if (!is.Numeric(p, posit = TRUE) || max(p) >= 1)
+ if (!is.Numeric(p, positive = TRUE) || max(p) >= 1)
stop("bad input for argument 'p'")
- if (!is.Numeric(shape, posit = TRUE))
+ if (!is.Numeric(shape, positive = TRUE))
stop("bad input for argument 'shape'")
- if (!is.Numeric(scale, posit = TRUE))
+ if (!is.Numeric(scale, positive = TRUE))
stop("bad input for argument 'scale'")
L = max(length(p), length(shape), length(scale))
- p = rep(p, len = L); shape = rep(shape, len = L);
- scale = rep(scale, len = L);
- ans = rep(0.0, len = L)
+ p = rep(p, length.out = L); shape = rep(shape, length.out = L);
+ scale = rep(scale, length.out = L);
+ ans = rep(0.0, length.out = L)
myfun = function(x, shape, scale = 1, p)
- pnaka(q=x, shape = shape, scale = scale) - p
+ pnaka(q = x, shape = shape, scale = scale) - p
for(ii in 1:L) {
EY = sqrt(scale[ii]/shape[ii]) *
gamma(shape[ii]+0.5) / gamma(shape[ii])
@@ -7642,36 +7963,39 @@ qnaka = function(p, shape, scale = 1, ...) {
rnaka = function(n, shape, scale = 1, Smallno=1.0e-6) {
- if (!is.Numeric(n, posit = TRUE, integ = TRUE))
- stop("bad input for argument 'n'")
- if (!is.Numeric(scale, posit = TRUE, allow = 1))
- stop("bad input for argument 'scale'")
- if (!is.Numeric(shape, posit = TRUE, allow = 1))
- stop("bad input for argument 'shape'")
- if (!is.Numeric(Smallno, posit = TRUE, allow = 1) || Smallno > 0.01 ||
- Smallno < 2 * .Machine$double.eps)
- stop("bad input for argument 'Smallno'")
- ans = rep(0.0, len = n)
-
- ptr1 = 1; ptr2 = 0
- ymax = dnaka(x = sqrt(scale * (1 - 0.5 / shape)),
- shape = shape, scale = scale)
- while(ptr2 < n) {
- EY = sqrt(scale / shape) * gamma(shape + 0.5) / gamma(shape)
- Upper = EY + 5 * scale
- while(pnaka(q=Upper, shape = shape, scale = scale) < 1-Smallno)
- Upper = Upper + scale
- x = runif(2*n, min=0, max=Upper)
- index = runif(2*n, max=ymax) < dnaka(x, shape = shape,
- scale = scale)
- sindex = sum(index)
- if (sindex) {
- ptr2 = min(n, ptr1 + sindex - 1)
- ans[ptr1:ptr2] = (x[index])[1:(1+ptr2-ptr1)]
- ptr1 = ptr2 + 1
- }
+
+ use.n = if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE, allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+
+ if (!is.Numeric(scale, positive = TRUE, allowable.length = 1))
+ stop("bad input for argument 'scale'")
+ if (!is.Numeric(shape, positive = TRUE, allowable.length = 1))
+ stop("bad input for argument 'shape'")
+ if (!is.Numeric(Smallno, positive = TRUE, allowable.length = 1) || Smallno > 0.01 ||
+ Smallno < 2 * .Machine$double.eps)
+ stop("bad input for argument 'Smallno'")
+ ans = rep(0.0, length.out = use.n)
+
+ ptr1 = 1; ptr2 = 0
+ ymax = dnaka(x = sqrt(scale * (1 - 0.5 / shape)),
+ shape = shape, scale = scale)
+ while(ptr2 < use.n) {
+ EY = sqrt(scale / shape) * gamma(shape + 0.5) / gamma(shape)
+ Upper = EY + 5 * scale
+ while(pnaka(q = Upper, shape = shape, scale = scale) < 1 - Smallno)
+ Upper = Upper + scale
+ x = runif(2*use.n, min = 0, max = Upper)
+ index = runif(2*use.n, max = ymax) < dnaka(x, shape = shape,
+ scale = scale)
+ sindex = sum(index)
+ if (sindex) {
+ ptr2 = min(use.n, ptr1 + sindex - 1)
+ ans[ptr1:ptr2] = (x[index])[1:(1+ptr2-ptr1)]
+ ptr1 = ptr2 + 1
}
- ans
+ }
+ ans
}
@@ -7686,8 +8010,10 @@ rnaka = function(n, shape, scale = 1, Smallno=1.0e-6) {
lshape = as.character(substitute(lshape))
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
- if (!is.null(iscale) && !is.Numeric(iscale, positi = TRUE))
+
+ if (!is.null(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("argument 'iscale' must be a positive number or NULL")
+
if (!is.list(eshape)) eshape = list()
if (!is.list(escale)) escale = list()
@@ -7709,11 +8035,11 @@ rnaka = function(n, shape, scale = 1, Smallno=1.0e-6) {
predictors.names = c(namesof("shape", .lshape, earg = .eshape, tag = FALSE),
namesof("scale", .lscale, earg = .escale, tag = FALSE))
if (!length(etastart)) {
- init2 = if (is.Numeric( .iscale, posit = TRUE))
- rep( .iscale, len = n) else rep(1, len = n)
- init1 = if (is.Numeric( .ishape, posit = TRUE))
- rep( .ishape, len = n) else
- rep(init2 / (y+1/8)^2, len = n)
+ init2 = if (is.Numeric( .iscale, positive = TRUE))
+ rep( .iscale, length.out = n) else rep(1, length.out = n)
+ init1 = if (is.Numeric( .ishape, positive = TRUE))
+ rep( .ishape, length.out = n) else
+ rep(init2 / (y+1/8)^2, length.out = n)
etastart = cbind(theta2eta(init1, .lshape, earg = .eshape),
theta2eta(init2, .lscale, earg = .escale))
}
@@ -7721,8 +8047,8 @@ rnaka = function(n, shape, scale = 1, Smallno=1.0e-6) {
.escale = escale, .eshape = eshape,
.ishape = ishape, .iscale = iscale ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- shape = eta2theta(eta[,1], .lshape, earg = .eshape)
- scale = eta2theta(eta[,2], .lscale, earg = .escale)
+ shape = eta2theta(eta[, 1], .lshape, earg = .eshape)
+ scale = eta2theta(eta[, 2], .lscale, earg = .escale)
sqrt(scale/shape) * gamma(shape+0.5) / gamma(shape)
}, list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape ))),
@@ -7734,8 +8060,8 @@ rnaka = function(n, shape, scale = 1, Smallno=1.0e-6) {
.escale = escale, .eshape = eshape ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- shape = eta2theta(eta[,1], .lshape, earg = .eshape)
- scale = eta2theta(eta[,2], .lscale, earg = .escale)
+ shape = eta2theta(eta[, 1], .lshape, earg = .eshape)
+ scale = eta2theta(eta[, 2], .lscale, earg = .escale)
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else
sum(w * dnaka(x = y, shape = shape, scale = scale, log = TRUE))
@@ -7743,8 +8069,8 @@ rnaka = function(n, shape, scale = 1, Smallno=1.0e-6) {
.escale = escale, .eshape = eshape ))),
vfamily = c("nakagami"),
deriv = eval(substitute(expression({
- shape = eta2theta(eta[,1], .lshape, earg = .eshape)
- Scale = eta2theta(eta[,2], .lscale, earg = .escale)
+ shape = eta2theta(eta[, 1], .lshape, earg = .eshape)
+ Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
dl.dshape = 1 + log(shape/Scale) - digamma(shape) +
2 * log(y) - y^2 / Scale
dl.dscale = -shape/Scale + shape * (y/Scale)^2
@@ -7773,11 +8099,11 @@ drayleigh = function(x, scale = 1, log = FALSE) {
rm(log)
L = max(length(x), length(scale))
- x = rep(x, len = L); scale = rep(scale, len = L);
- logdensity = rep(log(0), len = L)
+ x = rep(x, length.out = L); scale = rep(scale, length.out = L);
+ logdensity = rep(log(0), length.out = L)
xok = (x > 0)
logdensity[xok] = log(x[xok]) - 0.5 * (x[xok]/scale[xok])^2 -
- 2*log(scale[xok])
+ 2 * log(scale[xok])
if (log.arg) logdensity else exp(logdensity)
}
@@ -7786,24 +8112,24 @@ prayleigh = function(q, scale = 1) {
if (any(scale <= 0))
stop("argument 'scale' must be positive")
L = max(length(q), length(scale))
- q = rep(q, len = L); scale = rep(scale, len = L);
+ q = rep(q, length.out = L); scale = rep(scale, length.out = L);
ifelse(q > 0, -expm1(-0.5*(q/scale)^2), 0)
}
qrayleigh = function(p, scale = 1) {
- if (any(scale <= 0))
- stop("argument 'scale' must be positive")
if (any(p <= 0) || any(p >= 1))
stop("argument 'p' must be between 0 and 1")
- scale * sqrt(-2 * log1p(-p))
+ ans = scale * sqrt(-2 * log1p(-p))
+ ans[scale <= 0] = NaN
+ ans
}
rrayleigh = function(n, scale = 1) {
- if (any(scale <= 0))
- stop("argument 'scale' must be positive")
- scale * sqrt(-2 * log(runif(n)))
+ ans = scale * sqrt(-2 * log(runif(n)))
+ ans[scale <= 0] = NaN
+ ans
}
@@ -7811,8 +8137,10 @@ rrayleigh = function(n, scale = 1) {
rayleigh = function(lscale = "loge", escale = list(), nrfs = 1 / 3 + 0.01) {
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
+
if (!is.list(escale)) escale = list()
- if (!is.Numeric(nrfs, allow = 1) || nrfs<0 || nrfs > 1)
+
+ if (!is.Numeric(nrfs, allowable.length = 1) || nrfs<0 || nrfs > 1)
stop("bad input for 'nrfs'")
new("vglmff",
@@ -7877,11 +8205,11 @@ dparetoIV = function(x, location = 0, scale = 1, inequality = 1, shape = 1, log
N = max(length(x), length(location), length(scale), length(inequality),
length(shape))
- x = rep(x, len = N); location = rep(location, len = N)
- scale = rep(scale, len = N); inequality = rep(inequality, len = N)
- shape = rep(shape, len = N)
+ x = rep(x, length.out = N); location = rep(location, length.out = N)
+ scale = rep(scale, length.out = N); inequality = rep(inequality, length.out = N)
+ shape = rep(shape, length.out = N)
- logdensity = rep(log(0), len = N)
+ logdensity = rep(log(0), length.out = N)
xok = (x > location)
zedd = (x - location) / scale
logdensity[xok] = log(shape[xok]) - log(scale[xok]) - log(inequality[xok])+
@@ -7890,96 +8218,108 @@ dparetoIV = function(x, location = 0, scale = 1, inequality = 1, shape = 1, log
if (log.arg) logdensity else exp(logdensity)
}
-pparetoIV = function(q, location = 0, scale = 1, inequality = 1, shape = 1) {
- if (!is.Numeric(q)) stop("bad input for argument 'q'")
- if (!is.Numeric(scale, posit = TRUE))
- stop("bad input for argument 'scale'")
- if (!is.Numeric(inequality, posi = TRUE))
- stop("bad input for argument 'inequality'")
- if (!is.Numeric(shape, posit = TRUE))
- stop("bad input for argument 'shape'")
- N = max(length(q), length(location), length(scale),
- length(inequality), length(shape))
- q = rep(q, len = N); location = rep(location, len = N)
- scale = rep(scale, len = N); inequality = rep(inequality, len = N)
- shape = rep(shape, len = N)
- answer = q * 0
- ii = q > location
- zedd = (q[ii] - location[ii]) / scale[ii]
- answer[ii] = 1 - (1 + zedd^(1/inequality[ii]))^(-shape[ii])
- answer
-}
-
-qparetoIV = function(p, location = 0, scale = 1, inequality = 1, shape = 1) {
- if (!is.Numeric(p, posit = TRUE) || any(p >= 1))
- stop("bad input for argument 'p'")
- if (!is.Numeric(scale, posit = TRUE))
- stop("bad input for argument 'scale'")
- if (!is.Numeric(inequality, posi = TRUE))
- stop("bad input for argument 'inequality'")
- if (!is.Numeric(shape, posit = TRUE))
- stop("bad input for argument 'shape'")
- location + scale * (-1 + (1-p)^(-1/shape))^inequality
+pparetoIV =
+ function(q, location = 0, scale = 1, inequality = 1, shape = 1) {
+ if (!is.Numeric(q))
+ stop("bad input for argument 'q'")
+ if (!is.Numeric(scale, positive = TRUE))
+ stop("bad input for argument 'scale'")
+ if (!is.Numeric(inequality, positive = TRUE))
+ stop("bad input for argument 'inequality'")
+ if (!is.Numeric(shape, positive = TRUE))
+ stop("bad input for argument 'shape'")
+
+ N = max(length(q), length(location), length(scale),
+ length(inequality), length(shape))
+ q = rep(q, length.out = N); location = rep(location, length.out = N)
+ scale = rep(scale, length.out = N); inequality = rep(inequality, length.out = N)
+ shape = rep(shape, length.out = N)
+ answer = q * 0
+ ii = q > location
+ zedd = (q[ii] - location[ii]) / scale[ii]
+ answer[ii] = 1 - (1 + zedd^(1/inequality[ii]))^(-shape[ii])
+ answer
+}
+
+
+qparetoIV =
+ function(p, location = 0, scale = 1, inequality = 1, shape = 1) {
+ if (!is.Numeric(p, positive = TRUE) || any(p >= 1))
+ stop("bad input for argument 'p'")
+ if (!is.Numeric(inequality, positive = TRUE))
+ stop("bad input for argument 'inequality'")
+ if (!is.Numeric(shape, positive = TRUE))
+ stop("bad input for argument 'shape'")
+ ans = location + scale * (-1 + (1-p)^(-1/shape))^inequality
+ ans[scale <= 0] = NaN
+ ans[shape <= 0] = NaN
+ ans
}
-rparetoIV = function(n, location = 0, scale = 1, inequality = 1, shape = 1) {
- if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
- stop("bad input for argument n")
- if (!is.Numeric(scale, posit = TRUE)) stop("bad input for argument 'scale'")
- if (!is.Numeric(inequality, posi = TRUE))
- stop("bad input for argument 'inequality'")
- if (!is.Numeric(shape, posit = TRUE)) stop("bad input for argument 'shape'")
- location + scale * (-1 + runif(n)^(-1/shape))^inequality
+
+rparetoIV =
+ function(n, location = 0, scale = 1, inequality = 1, shape = 1) {
+ if (!is.Numeric(inequality, positive = TRUE))
+ stop("bad input for argument 'inequality'")
+ ans = location + scale * (-1 + runif(n)^(-1/shape))^inequality
+ ans[scale <= 0] = NaN
+ ans[shape <= 0] = NaN
+ ans
}
-dparetoIII = function(x, location = 0, scale = 1, inequality = 1, log = FALSE)
- dparetoIV(x = x, location=location, scale = scale, inequality=inequality,
- shape = 1, log = log)
+dparetoIII = function(x, location = 0, scale = 1, inequality = 1,
+ log = FALSE)
+ dparetoIV(x = x, location = location, scale = scale,
+ inequality = inequality, shape = 1, log = log)
pparetoIII = function(q, location = 0, scale = 1, inequality=1)
- pparetoIV(q=q, location=location, scale = scale, inequality=inequality,
- shape = 1)
+ pparetoIV(q = q, location = location, scale = scale,
+ inequality = inequality, shape = 1)
qparetoIII = function(p, location = 0, scale = 1, inequality=1)
- qparetoIV(p=p, location=location, scale = scale, inequality=inequality,
- shape = 1)
+ qparetoIV(p = p, location = location, scale = scale,
+ inequality = inequality, shape = 1)
rparetoIII = function(n, location = 0, scale = 1, inequality=1)
- rparetoIV(n = n, location=location, scale = scale, inequality=inequality,
- shape = 1)
+ rparetoIV(n = n, location= location, scale = scale,
+ inequality = inequality, shape = 1)
dparetoII = function(x, location = 0, scale = 1, shape = 1, log = FALSE)
- dparetoIV(x = x, location=location, scale = scale,
- inequality = 1, shape = shape,
- log = log)
+ dparetoIV(x = x, location = location, scale = scale,
+ inequality = 1, shape = shape,
+ log = log)
pparetoII = function(q, location = 0, scale = 1, shape = 1)
- pparetoIV(q=q, location=location, scale = scale,
- inequality = 1, shape = shape)
+ pparetoIV(q = q, location = location, scale = scale,
+ inequality = 1, shape = shape)
qparetoII = function(p, location = 0, scale = 1, shape = 1)
- qparetoIV(p=p, location=location, scale = scale,
- inequality = 1, shape = shape)
+ qparetoIV(p = p, location = location, scale = scale,
+ inequality = 1, shape = shape)
rparetoII = function(n, location = 0, scale = 1, shape = 1)
- rparetoIV(n = n, location=location, scale = scale,
- inequality = 1, shape = shape)
+ rparetoIV(n = n, location = location, scale = scale,
+ inequality = 1, shape = shape)
dparetoI = function(x, scale = 1, shape = 1)
- dparetoIV(x = x, location=scale, scale = scale, inequality = 1, shape = shape)
+ dparetoIV(x = x, location = scale, scale = scale, inequality = 1,
+ shape = shape)
pparetoI = function(q, scale = 1, shape = 1)
- pparetoIV(q=q, location=scale, scale = scale, inequality = 1, shape = shape)
+ pparetoIV(q = q, location = scale, scale = scale, inequality = 1,
+ shape = shape)
qparetoI = function(p, scale = 1, shape = 1)
- qparetoIV(p=p, location=scale, scale = scale, inequality = 1, shape = shape)
+ qparetoIV(p = p, location = scale, scale = scale, inequality = 1,
+ shape = shape)
rparetoI = function(n, scale = 1, shape = 1)
- rparetoIV(n = n, location=scale, scale = scale, inequality = 1, shape = shape)
+ rparetoIV(n = n, location = scale, scale = scale, inequality = 1,
+ shape = shape)
@@ -7996,6 +8336,7 @@ rparetoI = function(n, scale = 1, shape = 1)
linequality = as.character(substitute(linequality))
if (mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
+
if (!is.Numeric(location))
stop("argument 'location' must be numeric")
if (is.Numeric(iscale) && any(iscale <= 0))
@@ -8004,10 +8345,12 @@ rparetoI = function(n, scale = 1, shape = 1)
stop("argument 'iinequality' must be positive")
if (is.Numeric(ishape) && any(ishape <= 0))
stop("argument 'ishape' must be positive")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE) || imethod>2)
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE) || imethod>2)
stop("bad input for argument 'imethod'")
+
if (linequality == "nloge" && location != 0)
warning("The Burr distribution has 'location = 0' and 'linequality=nloge'")
+
if (!is.list(escale)) escale = list()
if (!is.list(einequality)) einequality = list()
if (!is.list(eshape)) eshape = list()
@@ -8047,9 +8390,9 @@ rparetoI = function(n, scale = 1, shape = 1)
shape.init = max(0.01, (2*A2-A1)/(A1-A2))
}
etastart=cbind(
- theta2eta(rep(scale.init, len = n), .lscale, earg = .escale),
- theta2eta(rep(inequality.init, len = n), .linequality, earg = .einequality),
- theta2eta(rep(shape.init, len = n), .lshape, earg = .eshape))
+ theta2eta(rep(scale.init, length.out = n), .lscale, earg = .escale),
+ theta2eta(rep(inequality.init, length.out = n), .linequality, earg = .einequality),
+ theta2eta(rep(shape.init, length.out = n), .lshape, earg = .eshape))
}
}), list( .location = location, .lscale = lscale,
.linequality = linequality, .lshape = lshape, .imethod = imethod,
@@ -8057,9 +8400,9 @@ rparetoI = function(n, scale = 1, shape = 1)
.iscale = iscale, .iinequality=iinequality, .ishape = ishape ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
location = extra$location
- Scale = eta2theta(eta[,1], .lscale, earg = .escale)
- inequality = eta2theta(eta[,2], .linequality, earg = .einequality)
- shape = eta2theta(eta[,3], .lshape, earg = .eshape)
+ Scale = eta2theta(eta[, 1], .lscale, earg = .escale)
+ inequality = eta2theta(eta[, 2], .linequality, earg = .einequality)
+ shape = eta2theta(eta[, 3], .lshape, earg = .eshape)
location + Scale * NA
}, list( .lscale = lscale, .linequality = linequality, .lshape = lshape,
.escale = escale, .einequality = einequality, .eshape = eshape ))),
@@ -8074,13 +8417,13 @@ rparetoI = function(n, scale = 1, shape = 1)
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
location = extra$location
- Scale = eta2theta(eta[,1], .lscale, earg = .escale)
- inequality = eta2theta(eta[,2], .linequality, earg = .einequality)
- shape = eta2theta(eta[,3], .lshape, earg = .eshape)
+ Scale = eta2theta(eta[, 1], .lscale, earg = .escale)
+ inequality = eta2theta(eta[, 2], .linequality, earg = .einequality)
+ shape = eta2theta(eta[, 3], .lshape, earg = .eshape)
zedd = (y - location) / Scale
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w * dparetoIV(x = y, location=location, scale=Scale,
+ sum(w * dparetoIV(x = y, location = location, scale=Scale,
inequality=inequality, shape = shape, log = TRUE))
}
}, list( .lscale = lscale, .linequality = linequality, .lshape = lshape,
@@ -8088,9 +8431,9 @@ rparetoI = function(n, scale = 1, shape = 1)
vfamily = c("paretoIV"),
deriv = eval(substitute(expression({
location = extra$location
- Scale = eta2theta(eta[,1], .lscale, earg = .escale)
- inequality = eta2theta(eta[,2], .linequality, earg = .einequality)
- shape = eta2theta(eta[,3], .lshape, earg = .eshape)
+ Scale = eta2theta(eta[, 1], .lscale, earg = .escale)
+ inequality = eta2theta(eta[, 2], .linequality, earg = .einequality)
+ shape = eta2theta(eta[, 3], .lshape, earg = .eshape)
zedd = (y - location) / Scale
temp100 = 1 + zedd^(1/inequality)
dl.dscale = (shape - (1+shape) / temp100) / (inequality * Scale)
@@ -8138,12 +8481,14 @@ rparetoI = function(n, scale = 1, shape = 1)
lscale = as.character(substitute(lscale))
if (mode(linequality) != "character" && mode(linequality) != "name")
linequality = as.character(substitute(linequality))
+
if (!is.Numeric(location))
stop("argument 'location' must be numeric")
if (is.Numeric(iscale) && any(iscale <= 0))
stop("argument 'iscale' must be positive")
if (is.Numeric(iinequality) && any(iinequality <= 0))
stop("argument 'iinequality' must be positive")
+
if (!is.list(escale)) escale = list()
if (!is.list(einequality)) einequality = list()
@@ -8171,15 +8516,15 @@ rparetoI = function(n, scale = 1, shape = 1)
if (!length(inequality.init) || !length(scale.init)) {
probs = (1:4)/5
ytemp = quantile(x=log(y-location), probs=probs)
- fittemp = lsfit(x=logit(probs), y = ytemp, int = TRUE)
+ fittemp = lsfit(x=logit(probs), y = ytemp, intercept = TRUE)
if (!length(inequality.init))
inequality.init = max(fittemp$coef["X"], 0.01)
if (!length(scale.init))
scale.init = exp(fittemp$coef["Intercept"])
}
etastart=cbind(
- theta2eta(rep(scale.init, len = n), .lscale, earg = .escale),
- theta2eta(rep(inequality.init, len = n), .linequality,
+ theta2eta(rep(scale.init, length.out = n), .lscale, earg = .escale),
+ theta2eta(rep(inequality.init, length.out = n), .linequality,
earg = .einequality))
}
}), list( .location = location, .lscale = lscale, .linequality = linequality,
@@ -8187,8 +8532,8 @@ rparetoI = function(n, scale = 1, shape = 1)
.iscale = iscale, .iinequality=iinequality ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
location = extra$location
- Scale = eta2theta(eta[,1], .lscale, earg = .escale)
- inequality = eta2theta(eta[,2], .linequality, earg = .einequality)
+ Scale = eta2theta(eta[, 1], .lscale, earg = .escale)
+ inequality = eta2theta(eta[, 2], .linequality, earg = .einequality)
location + Scale * NA
}, list( .lscale = lscale, .linequality = linequality,
.escale = escale, .einequality = einequality ))),
@@ -8201,12 +8546,12 @@ rparetoI = function(n, scale = 1, shape = 1)
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
location = extra$location
- Scale = eta2theta(eta[,1], .lscale, earg = .escale)
- inequality = eta2theta(eta[,2], .linequality, earg = .einequality)
+ Scale = eta2theta(eta[, 1], .lscale, earg = .escale)
+ inequality = eta2theta(eta[, 2], .linequality, earg = .einequality)
zedd = (y - location) / Scale
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w * dparetoIII(x = y, location=location, scale=Scale,
+ sum(w * dparetoIII(x = y, location = location, scale=Scale,
inequality=inequality, log = TRUE))
}
}, list( .lscale = lscale, .linequality = linequality,
@@ -8214,8 +8559,8 @@ rparetoI = function(n, scale = 1, shape = 1)
vfamily = c("paretoIII"),
deriv = eval(substitute(expression({
location = extra$location
- Scale = eta2theta(eta[,1], .lscale, earg = .escale)
- inequality = eta2theta(eta[,2], .linequality, earg = .einequality)
+ Scale = eta2theta(eta[, 1], .lscale, earg = .escale)
+ inequality = eta2theta(eta[, 2], .linequality, earg = .einequality)
shape = 1
zedd = (y - location) / Scale
temp100 = 1 + zedd^(1/inequality)
@@ -8252,12 +8597,14 @@ rparetoI = function(n, scale = 1, shape = 1)
lscale = as.character(substitute(lscale))
if (mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
+
if (!is.Numeric(location))
stop("argument 'location' must be numeric")
if (is.Numeric(iscale) && any(iscale <= 0))
stop("argument 'iscale' must be positive")
if (is.Numeric(ishape) && any(ishape <= 0))
stop("argument 'ishape' must be positive")
+
if (!is.list(escale)) escale = list()
if (!is.list(eshape)) eshape = list()
@@ -8285,23 +8632,23 @@ rparetoI = function(n, scale = 1, shape = 1)
probs = (1:4)/5
scale.init.0 = 1
ytemp = quantile(x=log(y-location+scale.init.0), probs=probs)
- fittemp = lsfit(x=log1p(-probs), y = ytemp, int = TRUE)
+ fittemp = lsfit(x=log1p(-probs), y = ytemp, intercept = TRUE)
if (!length(shape.init))
shape.init = max(-1/fittemp$coef["X"], 0.01)
if (!length(scale.init))
scale.init = exp(fittemp$coef["Intercept"])
}
etastart=cbind(
- theta2eta(rep(scale.init, len = n), .lscale, earg = .escale),
- theta2eta(rep(shape.init, len = n), .lshape, earg = .eshape))
+ theta2eta(rep(scale.init, length.out = n), .lscale, earg = .escale),
+ theta2eta(rep(shape.init, length.out = n), .lshape, earg = .eshape))
}
}), list( .location = location, .lscale = lscale,
.escale = escale, .eshape = eshape,
.lshape = lshape, .iscale = iscale, .ishape = ishape ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
location = extra$location
- Scale = eta2theta(eta[,1], .lscale, earg = .escale)
- shape = eta2theta(eta[,2], .lshape, earg = .eshape)
+ Scale = eta2theta(eta[, 1], .lscale, earg = .escale)
+ shape = eta2theta(eta[, 2], .lshape, earg = .eshape)
location + Scale * NA
}, list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape ))),
@@ -8314,12 +8661,12 @@ rparetoI = function(n, scale = 1, shape = 1)
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
location = extra$location
- Scale = eta2theta(eta[,1], .lscale, earg = .escale)
- shape = eta2theta(eta[,2], .lshape, earg = .eshape)
+ Scale = eta2theta(eta[, 1], .lscale, earg = .escale)
+ shape = eta2theta(eta[, 2], .lshape, earg = .eshape)
zedd = (y - location) / Scale
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w * dparetoII(x = y, location=location, scale=Scale,
+ sum(w * dparetoII(x = y, location = location, scale=Scale,
shape = shape, log = TRUE))
}
}, list( .lscale = lscale, .lshape = lshape,
@@ -8327,8 +8674,8 @@ rparetoI = function(n, scale = 1, shape = 1)
vfamily = c("paretoII"),
deriv = eval(substitute(expression({
location = extra$location
- Scale = eta2theta(eta[,1], .lscale, earg = .escale)
- shape = eta2theta(eta[,2], .lshape, earg = .eshape)
+ Scale = eta2theta(eta[, 1], .lscale, earg = .escale)
+ shape = eta2theta(eta[, 2], .lshape, earg = .eshape)
zedd = (y - location) / Scale
temp100 = 1 + zedd
dl.dscale = (shape - (1+shape) / temp100) / (1 * Scale)
@@ -8357,41 +8704,50 @@ rparetoI = function(n, scale = 1, shape = 1)
dpareto = function(x, location, shape, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
- L = max(length(x), length(location), length(shape))
- x = rep(x, len = L); location = rep(location, len = L); shape = rep(shape, len = L)
+ L = max(length(x), length(location), length(shape))
+ x = rep(x, length.out = L); location = rep(location, length.out = L); shape = rep(shape, length.out = L)
- logdensity = rep(log(0), len = L)
- xok = (x > location)
- logdensity[xok] = log(shape[xok]) + shape[xok] * log(location[xok]) -
+ logdensity = rep(log(0), length.out = L)
+ xok = (x > location)
+ logdensity[xok] = log(shape[xok]) + shape[xok] * log(location[xok]) -
(shape[xok]+1) * log(x[xok])
- if (log.arg) logdensity else exp(logdensity)
+ if (log.arg) logdensity else exp(logdensity)
}
+
ppareto = function(q, location, shape) {
- if (any(location <= 0)) stop("argument 'location' must be positive")
- if (any(shape <= 0)) stop("argument 'shape' must be positive")
- L = max(length(q), length(location), length(shape))
- q = rep(q, len = L); location = rep(location, len = L); shape = rep(shape, len = L)
- ifelse(q > location, 1 - (location/q)^shape, 0)
+
+ L = max(length(q), length(location), length(shape))
+ q = rep(q, length.out = L); location = rep(location, length.out = L);
+ shape = rep(shape, length.out = L)
+
+ ans = ifelse(q > location, 1 - (location/q)^shape, 0)
+ ans[location <= 0] = NaN
+ ans[shape <= 0] = NaN
+ ans
}
+
qpareto = function(p, location, shape) {
- if (any(location <= 0)) stop("argument 'location' must be positive")
- if (any(shape <= 0)) stop("argument 'shape' must be positive")
- if (any(p <= 0) || any(p >= 1)) stop("argument 'p' must be between 0 and 1")
- location / (1 - p)^(1/shape)
+ if (any(p <= 0) || any(p >= 1))
+ stop("argument 'p' must be between 0 and 1")
+
+ ans = location / (1 - p)^(1/shape)
+ ans[location <= 0] = NaN
+ ans[shape <= 0] = NaN
+ ans
}
+
rpareto = function(n, location, shape) {
- if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
- stop("bad input for argument 'n'")
- if (any(location <= 0)) stop("argument 'location' must be positive")
- if (any(shape <= 0)) stop("argument 'shape' must be positive")
- location / runif(n)^(1/shape)
+ ans = location / runif(n)^(1/shape)
+ ans[location <= 0] = NaN
+ ans[shape <= 0] = NaN
+ ans
}
@@ -8477,19 +8833,19 @@ dtpareto = function(x, lower, upper, shape, log = FALSE) {
if (!is.Numeric(x))
stop("bad input for argument 'x'")
- if (!is.Numeric(lower, pos = TRUE))
+ if (!is.Numeric(lower, positive = TRUE))
stop("argument 'lower' must be positive")
- if (!is.Numeric(upper, pos = TRUE))
+ if (!is.Numeric(upper, positive = TRUE))
stop("argument 'upper' must be positive")
- if (!is.Numeric(shape, pos = TRUE))
+ if (!is.Numeric(shape, positive = TRUE))
stop("argument 'shape' must be positive")
L = max(length(x), length(lower), length(upper), length(shape))
- x = rep(x, len = L); shape = rep(shape, len = L)
- lower = rep(lower, len = L); upper = rep(upper, len = L);
+ x = rep(x, length.out = L); shape = rep(shape, length.out = L)
+ lower = rep(lower, length.out = L); upper = rep(upper, length.out = L);
- logdensity <- rep(log(0), len = L)
+ logdensity <- rep(log(0), length.out = L)
xok <- (0 < lower) & (lower < x) & (x < upper) & (shape > 0)
logdensity[xok] <- log(shape[xok]) + shape[xok] * log(lower[xok]) -
@@ -8504,56 +8860,50 @@ dtpareto = function(x, lower, upper, shape, log = FALSE) {
ptpareto = function(q, lower, upper, shape) {
- if (!is.Numeric(q))
- stop("bad input for argument 'q'")
- if (!is.Numeric(lower, pos = TRUE))
- stop("argument 'lower' must be positive")
- if (!is.Numeric(upper, pos = TRUE))
- stop("argument 'upper' must be positive")
- if (!is.Numeric(shape, pos = TRUE))
- stop("argument 'shape' must be positive")
-
- L = max(length(q), length(lower), length(upper), length(shape))
- q = rep(q, len = L); lower = rep(lower, len = L);
- upper = rep(upper, len = L); shape = rep(shape, len = L)
-
- ans = q * 0
- xok <- (0 < lower) & (lower < q) & (q < upper) & (shape > 0)
- ans[xok] = (1 - (lower[xok]/q[xok])^shape[xok]) / (1 -
- (lower[xok]/upper[xok])^shape[xok])
- ans[q >= upper] = 1
- ans[upper < lower] <- NaN
- ans
+ if (!is.Numeric(q))
+ stop("bad input for argument 'q'")
+
+ L = max(length(q), length(lower), length(upper), length(shape))
+ q = rep(q, length.out = L); lower = rep(lower, length.out = L);
+ upper = rep(upper, length.out = L); shape = rep(shape, length.out = L)
+
+ ans = q * 0
+ xok <- (0 < lower) & (lower < q) & (q < upper) & (shape > 0)
+ ans[xok] = (1 - (lower[xok]/q[xok])^shape[xok]) / (1 -
+ (lower[xok]/upper[xok])^shape[xok])
+ ans[q >= upper] = 1
+
+ ans[upper < lower] <- NaN
+ ans[lower <= 0] = NaN
+ ans[upper <= 0] = NaN
+ ans[shape <= 0] = NaN
+
+ ans
}
qtpareto = function(p, lower, upper, shape) {
- if (!is.Numeric(p, posit = TRUE))
- stop("bad input for argument 'p'")
- if (!is.Numeric(lower, pos = TRUE))
- stop("argument 'lower' must be positive")
- if (!is.Numeric(upper, pos = TRUE))
- stop("argument 'upper' must be positive")
- if (!is.Numeric(shape, pos = TRUE))
- stop("argument 'shape' must be positive")
- if (max(p) >= 1)
- stop("argument 'p' must be in (0, 1)")
- if (min(upper - lower, na.rm = TRUE) < 0)
- stop("argument 'upper' must be greater than 'lower' values")
-
- lower / (1 - p*(1-(lower/upper)^shape))^(1/shape)
+ if (!is.Numeric(p, positive = TRUE))
+ stop("bad input for argument 'p'")
+ if (max(p) >= 1)
+ stop("argument 'p' must be in (0, 1)")
+
+ ans = lower / (1 - p*(1-(lower/upper)^shape))^(1/shape)
+ ans[lower <= 0] = NaN
+ ans[upper <= 0] = NaN
+ ans[shape <= 0] = NaN
+ ans[upper < lower] <- NaN
+ ans
}
rtpareto = function(n, lower, upper, shape) {
- if (!is.Numeric(lower, pos = TRUE))
- stop("argument 'lower' must be positive")
- if (!is.Numeric(upper, pos = TRUE))
- stop("argument 'upper' must be positive")
- if (!is.Numeric(shape, pos = TRUE))
- stop("argument 'shape' must be positive")
- qtpareto(p = runif(n), lower = lower, upper = upper, shape = shape)
+ ans = qtpareto(p = runif(n), lower = lower, upper = upper, shape = shape)
+ ans[lower <= 0] = NaN
+ ans[upper <= 0] = NaN
+ ans[shape <= 0] = NaN
+ ans
}
@@ -8564,17 +8914,17 @@ rtpareto = function(n, lower, upper, shape) {
if (mode(lshape) != "character" && mode(lshape) != "name")
lshape = as.character(substitute(lshape))
- if (!is.Numeric(lower, posit = TRUE, allow = 1))
+ if (!is.Numeric(lower, positive = TRUE, allowable.length = 1))
stop("bad input for argument 'lower'")
- if (!is.Numeric(upper, posit = TRUE, allow = 1))
+ if (!is.Numeric(upper, positive = TRUE, allowable.length = 1))
stop("bad input for argument 'upper'")
if (lower >= upper)
stop("lower < upper is required")
- if (length(ishape) && !is.Numeric(ishape, posit = TRUE))
+ if (length(ishape) && !is.Numeric(ishape, positive = TRUE))
stop("bad input for argument 'ishape'")
if (!is.list(earg)) earg = list()
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
imethod > 2)
stop("argument 'imethod' must be 1 or 2")
@@ -8614,7 +8964,7 @@ rtpareto = function(n, lower, upper, shape) {
shape.grid = 2^((-4):4)
try.this = getMaxMin(shape.grid, objfun = tpareto1.Loglikfun,
y = y, x = x, w = w)
- try.this = rep(try.this, len = n)
+ try.this = rep(try.this, length.out = n)
try.this
}
etastart = theta2eta(shape.init, .lshape, earg = .earg)
@@ -8675,7 +9025,7 @@ erf = function(x)
2 * pnorm(x * sqrt(2)) - 1
erfc = function(x)
- 2 * pnorm(x * sqrt(2), lower = FALSE)
+ 2 * pnorm(x * sqrt(2), lower.tail = FALSE)
@@ -8702,7 +9052,7 @@ erfc = function(x)
if (!length(etastart)) {
initlambda = if (length( .init.lambda)) .init.lambda else
1 / (0.01 + (y-1)^2)
- initlambda = rep(initlambda, len = n)
+ initlambda = rep(initlambda, length.out = n)
etastart = cbind(theta2eta(initlambda, link=.link.lambda, earg = .earg))
}
}), list( .link.lambda = link.lambda, .earg = earg,
@@ -8745,15 +9095,22 @@ erfc = function(x)
lshape = as.character(substitute(lshape))
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
- stop("bad input for argument 'zero'")
- if (!is.Numeric(tolerance, posit = TRUE, allow = 1) || tolerance>1.0e-2)
- stop("bad input for argument 'tolerance'")
- if (!is.Numeric(ishape, posit = TRUE))
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+ if (!is.Numeric(tolerance, positive = TRUE, allowable.length = 1) ||
+ tolerance > 1.0e-2)
+ stop("bad input for argument 'tolerance'")
+ if (!is.Numeric(ishape, positive = TRUE))
stop("bad input for argument 'ishape'")
- if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
+
+ if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
+
ishape[ishape == 1] = 1.1 # Fails in @deriv
+
if (!is.list(escale)) escale = list()
if (!is.list(eshape)) eshape = list()
@@ -8773,20 +9130,20 @@ erfc = function(x)
c(namesof("shape", .lshape, earg = .eshape, short = TRUE),
namesof("scale", .lscale, earg = .escale, short = TRUE))
if (!length(etastart)) {
- shape.init = if (!is.Numeric( .ishape, posit = TRUE))
+ shape.init = if (!is.Numeric( .ishape, positive = TRUE))
stop("argument 'ishape' must be positive") else
- rep( .ishape, len = n)
- scale.init = if (length( .iscale)) rep( .iscale, len = n) else
+ rep( .ishape, length.out = n)
+ scale.init = if (length( .iscale)) rep( .iscale, length.out = n) else
(digamma(shape.init+1) - digamma(1)) / (y+1/8)
- scale.init = rep(weighted.mean(scale.init, w = w), len = n)
+ scale.init = rep(weighted.mean(scale.init, w = w), length.out = n)
etastart = cbind(theta2eta(shape.init, .lshape, earg = .eshape),
theta2eta(scale.init, .lscale, earg = .escale))
}
}), list( .lshape = lshape, .lscale = lscale, .iscale = iscale, .ishape = ishape,
.eshape = eshape, .escale = escale ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- shape = eta2theta(eta[,1], .lshape, earg = .eshape)
- scale = eta2theta(eta[,2], .lscale, earg = .escale)
+ shape = eta2theta(eta[, 1], .lshape, earg = .eshape)
+ scale = eta2theta(eta[, 2], .lscale, earg = .escale)
(digamma(shape+1)-digamma(1)) / scale
}, list( .lshape = lshape, .lscale = lscale,
.eshape = eshape, .escale = escale ))),
@@ -8798,8 +9155,8 @@ erfc = function(x)
.eshape = eshape, .escale = escale ))),
loglikelihood= eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- shape = eta2theta(eta[,1], .lshape, earg = .eshape)
- scale = eta2theta(eta[,2], .lscale, earg = .escale)
+ shape = eta2theta(eta[, 1], .lshape, earg = .eshape)
+ scale = eta2theta(eta[, 2], .lscale, earg = .escale)
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else
sum(w * (log(shape) + log(scale) +
@@ -8808,8 +9165,8 @@ erfc = function(x)
.eshape = eshape, .escale = escale ))),
vfamily = c("expexp"),
deriv = eval(substitute(expression({
- shape = eta2theta(eta[,1], .lshape, earg = .eshape)
- scale = eta2theta(eta[,2], .lscale, earg = .escale)
+ shape = eta2theta(eta[, 1], .lshape, earg = .eshape)
+ scale = eta2theta(eta[, 2], .lscale, earg = .escale)
dl.dscale = 1/scale + (shape-1)*y*exp(-scale*y) / (-expm1(-scale*y)) - y
dl.dshape = 1/shape + log1p(-exp(-scale*y))
dscale.deta = dtheta.deta(scale, .lscale, earg = .escale)
@@ -8820,7 +9177,7 @@ erfc = function(x)
.eshape = eshape, .escale = escale ))),
weight = eval(substitute(expression({
d11 = 1 / shape^2 # True for all shape
- d22 = d12 = rep(as.numeric(NA), len = n)
+ d22 = d12 = rep(as.numeric(NA), length.out = n)
index2 = abs(shape - 2) > .tolerance # index2 = shape != 1
largeno = 10000
if (any(index2)) {
@@ -8867,8 +9224,10 @@ erfc = function(x)
ishape = 1) {
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
+
if (!is.list(escale)) escale = list()
- if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
+
+ if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
new("vglmff",
@@ -8881,18 +9240,23 @@ erfc = function(x)
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
predictors.names = namesof("scale", .lscale, earg = .escale, short = TRUE)
- if (length(w) != n || !is.Numeric(w, integer = TRUE, posit = TRUE))
- stop("weights must be a vector of positive integers")
+
+ if (length(w) != n ||
+ !is.Numeric(w, integer.valued = TRUE, positive = TRUE))
+ stop("argument 'weights' must be a vector of positive integers")
+
if (!intercept.only)
- stop("this family function only works for an intercept-only, i.e., y ~ 1")
+ stop("this family function only works for an ",
+ "intercept-only, i.e., y ~ 1")
extra$yvector = y
extra$sumw = sum(w)
extra$w = w
+
if (!length(etastart)) {
- shape.init = if (!is.Numeric( .ishape, posit = TRUE))
+ shape.init = if (!is.Numeric( .ishape, positive = TRUE))
stop("argument 'ishape' must be positive") else
- rep( .ishape, len = n)
- scaleinit = if (length( .iscale)) rep( .iscale, len = n) else
+ rep( .ishape, length.out = n)
+ scaleinit = if (length( .iscale)) rep( .iscale, length.out = n) else
(digamma(shape.init+1) - digamma(1)) / (y+1/8)
etastart = cbind(theta2eta(scaleinit, .lscale, earg = .escale))
}
@@ -8967,7 +9331,7 @@ betaffqn.control <- function(save.weight = TRUE, ...)
if (mode(link) != "character" && mode(link) != "name")
link = as.character(substitute(link))
- if (!is.Numeric(A, allow = 1) || !is.Numeric(B, allow = 1) || A >= B)
+ if (!is.Numeric(A, allowable.length = 1) || !is.Numeric(B, allowable.length = 1) || A >= B)
stop("A must be < B, and both must be of length one")
stdbeta = (A == 0 && B == 1) # stdbeta==T iff standard beta distribution
if (!is.list(earg)) earg = list()
@@ -8997,7 +9361,7 @@ betaffqn.control <- function(save.weight = TRUE, ...)
}
# For QN update below
- if (length(w) != n || !is.Numeric(w, posit = TRUE))
+ if (length(w) != n || !is.Numeric(w, positive = TRUE))
stop("weights must be a vector of positive weights")
if (!length(etastart)) {
@@ -9012,7 +9376,7 @@ betaffqn.control <- function(save.weight = TRUE, ...)
}), list( .link = link, .earg = earg, .i1=i1, .i2=i2, .trim=trim, .A = A, .B = B ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
shapes = eta2theta(eta, .link, earg = .earg)
- .A + ( .B-.A) * shapes[,1] / (shapes[,1] + shapes[,2])
+ .A + ( .B-.A) * shapes[, 1] / (shapes[, 1] + shapes[, 2])
}, list( .link = link, .earg = earg, .A = A, .B = B ))),
last = eval(substitute(expression({
misc$link = c(shape1 = .link, shape2 = .link)
@@ -9024,14 +9388,14 @@ betaffqn.control <- function(save.weight = TRUE, ...)
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL){
shapes = eta2theta(eta, .link, earg = .earg)
- temp = lbeta(shapes[,1], shapes[,2])
+ temp = lbeta(shapes[, 1], shapes[, 2])
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w * ((shapes[,1]-1)*log(y-.A) + (shapes[,2]-1)*log( .B-y) - temp -
- (shapes[,1]+shapes[,2]-1)*log( .B-.A )))
+ sum(w * ((shapes[, 1]-1)*log(y-.A) + (shapes[, 2]-1)*log( .B-y) - temp -
+ (shapes[, 1]+shapes[, 2]-1)*log( .B-.A )))
}
}, list( .link = link, .earg = earg, .A = A, .B = B ))),
vfamily = "betaffqn",
@@ -9039,7 +9403,7 @@ betaffqn.control <- function(save.weight = TRUE, ...)
shapes = eta2theta(eta, .link, earg = .earg)
dshapes.deta = dtheta.deta(shapes, .link, earg = .earg)
dl.dshapes = cbind(log(y-.A), log( .B-y)) - digamma(shapes) +
- digamma(shapes[,1] + shapes[,2]) - log( .B - .A)
+ digamma(shapes[, 1] + shapes[, 2]) - log( .B - .A)
if (iter == 1) {
etanew = eta
} else {
@@ -9077,12 +9441,18 @@ betaffqn.control <- function(save.weight = TRUE, ...)
llocation = as.character(substitute(llocation))
if (mode(lscale) != "character" && mode(lscale) != "name")
lscale = as.character(substitute(lscale))
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
- imethod > 2) stop("argument 'imethod' must be 1 or 2")
- if (length(zero) && !is.Numeric(zero, integer = TRUE, posit = TRUE))
- stop("bad input for argument 'zero'")
- if (length(iscale) && !is.Numeric(iscale, posit = TRUE))
- stop("bad input for argument 'iscale'")
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+ if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
+ stop("bad input for argument 'iscale'")
+
if (!is.list(elocation)) elocation = list()
if (!is.list(escale)) escale = list()
@@ -9113,11 +9483,11 @@ betaffqn.control <- function(save.weight = TRUE, ...)
location.init = median(rep(y, w))
scale.init = sqrt(3) * sum(w*(y-location.init)^2) / (sum(w)*pi)
}
- location.init = if (length( .ilocation)) rep( .ilocation, len = n) else
- rep(location.init, len = n)
+ location.init = if (length( .ilocation)) rep( .ilocation, length.out = n) else
+ rep(location.init, length.out = n)
if ( .llocation == "loge") location.init = abs(location.init) + 0.001
- scale.init = if (length( .iscale)) rep( .iscale, len = n) else
- rep(1, len = n)
+ scale.init = if (length( .iscale)) rep( .iscale, length.out = n) else
+ rep(1, length.out = n)
etastart = cbind(
theta2eta(location.init, .llocation, earg = .elocation),
theta2eta(scale.init, .lscale, earg = .escale))
@@ -9127,7 +9497,7 @@ betaffqn.control <- function(save.weight = TRUE, ...)
.llocation = llocation, .lscale = lscale,
.ilocation = ilocation, .iscale = iscale ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[,1], .llocation, earg = .elocation)
+ eta2theta(eta[, 1], .llocation, earg = .elocation)
}, list( .llocation = llocation,
.elocation = elocation, .escale = escale ))),
last = eval(substitute(expression({
@@ -9137,8 +9507,8 @@ betaffqn.control <- function(save.weight = TRUE, ...)
.elocation = elocation, .escale = escale ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- location = eta2theta(eta[,1], .llocation, earg = .elocation)
- Scale = eta2theta(eta[,2], .lscale, earg = .escale)
+ location = eta2theta(eta[, 1], .llocation, earg = .elocation)
+ Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
sum(w * dlogis(x = y, location = location,
@@ -9148,8 +9518,8 @@ betaffqn.control <- function(save.weight = TRUE, ...)
.elocation = elocation, .escale = escale ))),
vfamily = c("logistic2"),
deriv = eval(substitute(expression({
- location = eta2theta(eta[,1], .llocation, earg = .elocation)
- Scale = eta2theta(eta[,2], .lscale, earg = .escale)
+ location = eta2theta(eta[, 1], .llocation, earg = .elocation)
+ Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
zedd = (y-location) / Scale
ezedd = exp(-zedd)
dl.dlocation = (1-ezedd) / ((1 + ezedd) * Scale)
@@ -9177,6 +9547,234 @@ betaffqn.control <- function(save.weight = TRUE, ...)
+ negbinomial.size = function(size = Inf,
+ lmu = "loge",
+ emu = list(),
+ imu = NULL,
+ quantile.probs = 0.75,
+ imethod = 1,
+ shrinkage.init = 0.95, zero = NULL)
+{
+
+
+
+
+ if (any(size <= 0))
+ stop("bad input for argument 'size'")
+ if (any(is.na(size)))
+ stop("bad input for argument 'size'")
+
+
+ if (mode(lmu) != "character" && mode(lmu) != "name")
+ lmu = as.character(substitute(lmu))
+
+
+ if (length(imu) && !is.Numeric(imu, positive = TRUE))
+ stop("bad input for argument 'imu'")
+
+ if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
+ imethod > 3)
+ stop("argument 'imethod' must be 1 or 2 or 3")
+ if (!is.Numeric(shrinkage.init, allowable.length = 1) || shrinkage.init < 0 ||
+ shrinkage.init > 1)
+ stop("bad input for argument 'shrinkage.init'")
+
+
+ ans =
+ new("vglmff",
+
+ blurb = c("Negative-binomial distribution with size known\n\n",
+ "Links: ",
+ namesof("mu", lmu, earg = emu), "\n",
+ "Mean: mu\n",
+ "Variance: mu * (1 + mu / size) for NB-2"),
+
+ constraints = eval(substitute(expression({
+
+ dotzero <- .zero
+ Musual <- 2
+ eval(negzero.expression)
+
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ zero = .zero)
+ }, list( .zero = zero ))),
+
+ initialize = eval(substitute(expression({
+ Musual <- 1
+
+ if (any(y < 0))
+ stop("negative values not allowed for the 'negbinomial' family")
+ if (any(round(y) != y))
+ stop("integer-values only allowed for the 'negbinomial' family")
+
+ y = as.matrix(y)
+ M = Musual * ncol(y)
+ NOS = ncoly = ncol(y) # Number of species
+ predictors.names =
+ c(namesof(if (NOS == 1) "mu" else paste("mu", 1:NOS, sep = ""),
+ .lmu, earg = .emu, tag = FALSE))
+
+
+ if (is.numeric( .mu.init ))
+ MU.INIT <- matrix( .mu.init, nrow(y), ncol(y), byrow = TRUE)
+
+
+ if (!length(etastart)) {
+ mu.init = y
+ for(iii in 1:ncol(y)) {
+ use.this = if ( .imethod == 1) {
+ weighted.mean(y[, iii], w) + 1/16
+ } else if ( .imethod == 3) {
+ c(quantile(y[, iii], probs = .quantile.probs) + 1/16)
+ } else {
+ median(y[, iii]) + 1/16
+ }
+
+ if (is.numeric( .mu.init )) {
+ mu.init[, iii] = MU.INIT[, iii]
+ } else {
+ medabsres = median(abs(y[, iii] - use.this)) + 1/32
+ allowfun = function(z, maxtol=1) sign(z)*pmin(abs(z), maxtol)
+ mu.init[, iii] = use.this + (1 - .sinit) *
+ allowfun(y[, iii] - use.this, maxtol = medabsres)
+
+ mu.init[, iii] = abs(mu.init[, iii]) + 1 / 1024
+ }
+ } # of for(iii)
+
+
+ kmat = matrix( .size , n, NOS, byrow = TRUE)
+
+ newemu = if ( .lmu == "nbcanlink") {
+ c(list(size = kmat), .emu)
+ } else {
+ .emu
+ }
+ etastart = cbind(theta2eta(mu.init , link = .lmu , earg = newemu ))
+ }
+ }), list( .lmu = lmu,
+ .emu = emu,
+ .mu.init = imu,
+ .size = size, .quantile.probs = quantile.probs,
+ .sinit = shrinkage.init,
+ .zero = zero, .imethod = imethod ))),
+
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ Musual <- 1
+ eta = cbind(eta)
+ NOS = ncol(eta) / Musual
+ n = nrow(eta)
+ kmat = matrix( .size , n, NOS, byrow = TRUE)
+
+
+ newemu = if ( .lmu == "nbcanlink") {
+ c(list(size = kmat), .emu)
+ } else {
+ .emu
+ }
+
+ eta2theta(eta, .lmu , earg = newemu)
+ }, list( .lmu = lmu,
+ .size = size,
+ .emu = emu ))),
+
+ last = eval(substitute(expression({
+
+ temp0303 = c(rep( .lmu, length = NOS))
+ names(temp0303) = c(if (NOS == 1) "mu" else
+ paste("mu", 1:NOS, sep = ""))
+ misc$link = temp0303 # Already named
+ misc$earg = vector("list", M)
+ names(misc$earg) = names(misc$link)
+ for(ii in 1:NOS) {
+ misc$earg[[ii]] = newemu
+ }
+
+ misc$imethod = .imethod
+ misc$expected = TRUE
+ misc$shrinkage.init = .sinit
+ misc$size = kmat
+ }), list( .lmu = lmu,
+ .emu = emu,
+ .sinit = shrinkage.init,
+ .imethod = imethod ))),
+
+
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ mu = cbind(mu)
+ y = cbind(y)
+ w = cbind(w)
+ eta = cbind(eta)
+ NOS = ncol(eta)
+ n = nrow(eta)
+ kmat = matrix( .size , n, NOS, byrow = TRUE)
+
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ ind1 = is.finite(kmat)
+ ans1 = ans2 = 0
+ for (kk in 1:NOS) {
+ ind1 = is.finite(kmat[, kk])
+ ans1 = ans1 +
+ sum(w[ind1] * dnbinom(x = y[ind1, kk], mu = mu[ind1, kk],
+ size = kmat[ind1, kk], log = TRUE))
+ ans2 = ans2 +
+ sum(w[!ind1] * dpois(x = y[!ind1, kk], lambda = mu[!ind1, kk],
+ log = TRUE))
+ }
+
+ ans = ans1 + ans2
+ ans
+ }
+ }, list( .size = size ))),
+
+ vfamily = c("negbinomial.size"),
+
+ deriv = eval(substitute(expression({
+ eta = cbind(eta)
+ NOS = M = ncol(eta)
+ kmat = matrix( .size , n, M, byrow = TRUE)
+
+ newemu = if ( .lmu == "nbcanlink") {
+ c(list(size = kmat), .emu)
+ } else {
+ .emu
+ }
+
+ dl.dmu = y/mu - (y+kmat)/(kmat+mu)
+ dl.dmu[!is.finite(dl.dmu)] = (y/mu)[!is.finite(dl.dmu)] - 1
+
+ dmu.deta = dtheta.deta(mu, .lmu ,
+ earg = c(list(wrt.eta = 1), newemu)) # eta1
+
+ myderiv = c(w) * dl.dmu * dmu.deta
+ myderiv
+ }), list( .lmu = lmu,
+ .emu = emu,
+ .size = size ))),
+
+ weight = eval(substitute(expression({
+ wz = matrix(as.numeric(NA), n, M) # wz is 'diagonal'
+
+ ed2l.dmu2 = 1 / mu - 1 / (mu + kmat)
+ wz = dmu.deta^2 * ed2l.dmu2
+
+
+ c(w) * wz
+ }), list( .lmu = lmu ))))
+
+ ans
+}
+
+
+
+
+
+
diff --git a/R/family.vglm.R b/R/family.vglm.R
index 4f6fccc..df14a22 100644
--- a/R/family.vglm.R
+++ b/R/family.vglm.R
@@ -1,12 +1,16 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
+
+if (FALSE)
family.vglm <- function(object, ...)
object$vfamily
+
+if (FALSE)
print.vfamily <- function(x, ...)
{
f <- x$vfamily
diff --git a/R/family.zeroinf.R b/R/family.zeroinf.R
index ff67335..e3ac25b 100644
--- a/R/family.zeroinf.R
+++ b/R/family.zeroinf.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -8,1840 +8,2864 @@
-dzanegbin = function(x, p0, size, prob = NULL, munb = NULL, log = FALSE) {
- if (length(munb)) {
- if (length(prob))
- stop("'prob' and 'munb' both specified")
- prob <- size/(size + munb)
- }
- if (!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
- rm(log)
-
- LLL = max(length(x), length(p0), length(prob), length(size))
- if (length(x) != LLL) x = rep(x, len = LLL)
- if (length(p0) != LLL) p0 = rep(p0, len = LLL);
- if (length(prob) != LLL) prob = rep(prob, len = LLL)
- if (length(size) != LLL) size = rep(size, len = LLL);
- ans = rep(0.0, len = LLL)
- if (!is.Numeric(p0) || any(p0 < 0) || any(p0 > 1))
- stop("'p0' must be in [0,1]")
- if (!is.Numeric(prob, posit = TRUE))
- stop("'prob' must be in [0,Inf)")
- if (!is.Numeric(size, posit = TRUE))
- stop("'size' must be in [0,Inf)")
- index0 = x == 0
-
- if (log.arg) {
- ans[ index0] = log(p0[index0])
- ans[!index0] = log1p(-p0[!index0]) +
- dposnegbin(x[!index0], prob = prob[!index0],
- size = size[!index0], log = TRUE)
- } else {
- ans[ index0] = p0[index0]
- ans[!index0] = (1-p0[!index0]) * dposnegbin(x[!index0],
- prob = prob[!index0], size = size[!index0])
- }
- ans
-}
-pzanegbin = function(q, p0, size, prob = NULL, munb = NULL) {
- if (length(munb)) {
- if (length(prob))
- stop("'prob' and 'munb' both specified")
- prob <- size/(size + munb)
- }
- LLL = max(length(q), length(p0), length(prob), length(size))
- if (length(q) != LLL) q = rep(q, len = LLL);
- if (length(p0) != LLL) p0 = rep(p0, len = LLL);
- if (length(prob) != LLL) prob = rep(prob, len = LLL);
- if (length(size) != LLL) size = rep(size, len = LLL);
- ans = rep(0.0, len = LLL)
-
- if (!is.Numeric(p0) || any(p0 < 0) || any(p0 > 1))
- stop("'p0' must be in [0,1]")
- ans[q > 0] = p0[q > 0] + (1-p0[q > 0]) *
- pposnegbin(q[q > 0], size = size[q > 0], prob = prob[q > 0])
- ans[q < 0] = 0
- ans[q == 0] = p0[q == 0]
- ans
-}
-qzanegbin = function(p, p0, size, prob = NULL, munb = NULL) {
- if (length(munb)) {
- if (length(prob))
- stop("'prob' and 'munb' both specified")
- prob <- size/(size + munb)
- }
- LLL = max(length(p), length(p0), length(prob), length(size))
- if (length(p) != LLL) p = rep(p, len = LLL);
- if (length(p0) != LLL) p0 = rep(p0, len = LLL);
- if (length(prob) != LLL) prob = rep(prob, len = LLL);
- if (length(size) != LLL) size = rep(size, len = LLL);
- ans = rep(0.0, len = LLL)
-
- if (!is.Numeric(p0) || any(p0 < 0) || any(p0 > 1))
- stop("argument 'p0' must be between 0 and 1 inclusive")
- ans = p
- ans[p <= p0] = 0
- ans[p > p0] = qposnegbin((p[p>p0]-p0[p>p0])/(1-p0[p>p0]), prob = prob[p>p0],
- size = size[p>p0])
- ans
-}
+dzanegbin = function(x, size, prob = NULL, munb = NULL, pobs0 = 0,
+ log = FALSE) {
+ if (length(munb)) {
+ if (length(prob))
+ stop("arguments 'prob' and 'munb' both specified")
+ prob <- size / (size + munb)
+ }
-rzanegbin = function(n, p0, size, prob = NULL, munb = NULL) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
- stop("bad input for argument 'n'") else n
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
- if (length(munb)) {
- if (length(prob))
- stop("'prob' and 'munb' both specified")
- prob <- size/(size + munb)
- }
- ans = rposnegbin(n = use.n, prob = prob, size = size)
- if (length(p0) != use.n) p0 = rep(p0, len = use.n)
- if (!is.Numeric(p0) || any(p0 < 0) || any(p0 > 1))
- stop("argument 'p0' must be between 0 and 1 inclusive")
- ifelse(runif(use.n) < p0, 0, ans)
+ LLL = max(length(x), length(pobs0), length(prob), length(size))
+ if (length(x) != LLL) x = rep(x, len = LLL)
+ if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
+ if (length(prob) != LLL) prob = rep(prob, len = LLL)
+ if (length(size) != LLL) size = rep(size, len = LLL);
+
+ ans = rep(0.0, len = LLL)
+ if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be in [0,1]")
+ if (!is.Numeric(prob, positive = TRUE))
+ stop("argument 'prob' must be in [0,Inf)")
+ if (!is.Numeric(size, positive = TRUE))
+ stop("argument 'size' must be in [0,Inf)")
+ index0 = x == 0
+
+ if (log.arg) {
+ ans[ index0] = log(pobs0[index0])
+ ans[!index0] = log1p(-pobs0[!index0]) +
+ dposnegbin(x[!index0], prob = prob[!index0],
+ size = size[!index0], log = TRUE)
+ } else {
+ ans[ index0] = pobs0[index0]
+ ans[!index0] = (1-pobs0[!index0]) * dposnegbin(x[!index0],
+ prob = prob[!index0], size = size[!index0])
+ }
+ ans
}
+pzanegbin = function(q, size, prob = NULL, munb = NULL, pobs0 = 0) {
+ if (length(munb)) {
+ if (length(prob))
+ stop("arguments 'prob' and 'munb' both specified")
+ prob <- size / (size + munb)
+ }
+ LLL = max(length(q), length(pobs0), length(prob), length(size))
+ if (length(q) != LLL) q = rep(q, len = LLL);
+ if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
+ if (length(prob) != LLL) prob = rep(prob, len = LLL);
+ if (length(size) != LLL) size = rep(size, len = LLL);
+ ans = rep(0.0, len = LLL)
+
+ if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be in [0,1]")
+ qindex = (q > 0)
+ ans[ qindex] = pobs0[qindex] + (1 - pobs0[qindex]) *
+ pposnegbin(q[qindex], size = size[qindex],
+ prob = prob[qindex])
+ ans[q < 0] = 0
+ ans[q == 0] = pobs0[q == 0]
+ ans
+}
-dzapois = function(x, lambda, p0=0, log = FALSE) {
- if (!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
- rm(log)
- LLL = max(length(x), length(lambda), length(p0))
- if (length(x) != LLL) x = rep(x, len = LLL);
- if (length(lambda) != LLL) lambda = rep(lambda, len = LLL);
- if (length(p0) != LLL) p0 = rep(p0, len = LLL);
- ans = rep(0.0, len = LLL)
- if (!is.Numeric(p0) || any(p0 < 0) || any(p0 > 1))
- stop("argument 'p0' must be in [0,1]")
- index0 = (x == 0)
+qzanegbin = function(p, size, prob = NULL, munb = NULL, pobs0 = 0) {
+ if (length(munb)) {
+ if (length(prob))
+ stop("arguments 'prob' and 'munb' both specified")
+ prob <- size/(size + munb)
+ }
- if (log.arg) {
- ans[ index0] = log(p0[index0])
- ans[!index0] = log1p(-p0[!index0]) +
- dpospois(x[!index0], lambda[!index0], log = TRUE)
- } else {
- ans[ index0] = p0[index0]
- ans[!index0] = (1-p0[!index0]) * dpospois(x[!index0], lambda[!index0])
- }
- ans
+ LLL = max(length(p), length(pobs0), length(prob), length(size))
+ if (length(p) != LLL) p = rep(p, len = LLL);
+ if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
+ if (length(prob) != LLL) prob = rep(prob, len = LLL);
+ if (length(size) != LLL) size = rep(size, len = LLL);
+ ans = rep(0.0, len = LLL)
+
+ if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be between 0 and 1 inclusive")
+ ans = p
+ ans[p <= pobs0] = 0
+ pindex = (p > pobs0)
+ ans[pindex] = qposnegbin((p[pindex] -
+ pobs0[pindex]) / (1 - pobs0[pindex]),
+ prob = prob[pindex],
+ size = size[pindex])
+ ans
}
+rzanegbin = function(n, size, prob = NULL, munb = NULL, pobs0 = 0) {
+ use.n = if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
-pzapois = function(q, lambda, p0=0) {
- LLL = max(length(q), length(lambda), length(p0))
- if (length(q) != LLL) q = rep(q, len = LLL);
- if (length(lambda) != LLL) lambda = rep(lambda, len = LLL);
- if (length(p0) != LLL) p0 = rep(p0, len = LLL);
- ans = rep(0.0, len = LLL)
-
- if (!is.Numeric(p0) || any(p0 < 0) || any(p0 > 1))
- stop("argument 'p0' must be in [0,1]")
- ans[q > 0] = p0[q > 0] + (1-p0[q > 0]) * ppospois(q[q > 0], lambda[q > 0])
- ans[q < 0] = 0
- ans[q == 0] = p0[q == 0]
- ans
-}
-
+ if (length(munb)) {
+ if (length(prob))
+ stop("arguments 'prob' and 'munb' both specified")
+ prob <- size / (size + munb)
+ }
-qzapois = function(p, lambda, p0=0) {
- LLL = max(length(p), length(lambda), length(p0))
- if (length(p) != LLL) p = rep(p, len = LLL);
- if (length(lambda) != LLL) lambda = rep(lambda, len = LLL);
- if (length(p0) != LLL) p0 = rep(p0, len = LLL);
+ ans = rposnegbin(n = use.n, prob = prob, size = size)
+ if (length(pobs0) != use.n) pobs0 = rep(pobs0, len = use.n)
+ if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be between 0 and 1 inclusive")
- if (!is.Numeric(p0) || any(p0 < 0) || any(p0 > 1))
- stop("argument 'p0' must be between 0 and 1 inclusive")
- ans = p
- ind4 = (p > p0)
- ans[!ind4] = 0
- ans[ ind4] = qpospois((p[ind4]-p0[ind4])/(1-p0[ind4]), lam=lambda[ind4])
- ans
+ ifelse(runif(use.n) < pobs0, 0, ans)
}
-rzapois = function(n, lambda, p0=0) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
- stop("bad input for argument 'n'") else n
- ans = rpospois(use.n, lambda)
- if (length(p0) != use.n) p0 = rep(p0, len = use.n)
- if (!is.Numeric(p0) || any(p0 < 0) || any(p0 > 1))
- stop("argument 'p0' must be between 0 and 1 inclusive")
- ifelse(runif(use.n) < p0, 0, ans)
-}
+dzapois = function(x, lambda, pobs0 = 0, log = FALSE) {
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+ LLL = max(length(x), length(lambda), length(pobs0))
+ if (length(x) != LLL) x = rep(x, len = LLL);
+ if (length(lambda) != LLL) lambda = rep(lambda, len = LLL);
+ if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
+ ans = rep(0.0, len = LLL)
-dzipois = function(x, lambda, phi = 0, log = FALSE) {
- if (!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
- rm(log)
+ if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be in [0,1]")
- LLL = max(length(x), length(lambda), length(phi))
- if (length(x) != LLL) x = rep(x, len = LLL);
- if (length(lambda) != LLL) lambda = rep(lambda, len = LLL);
- if (length(phi) != LLL) phi = rep(phi, len = LLL);
- ans = rep(0.0, len = LLL)
- if (!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
- stop("argument 'phi' must be between 0 and 1 inclusive")
+ index0 = (x == 0)
- index0 = (x == 0)
- if (log.arg) {
- ans[ index0] = log(phi[ index0] + (1-phi[ index0]) *
- dpois(x[ index0], lambda[ index0]))
- ans[!index0] = log1p(-phi[!index0]) +
- dpois(x[!index0], lambda[!index0], log = TRUE)
- } else {
- ans[ index0] = phi[ index0] + (1-phi[ index0]) *
- dpois(x[ index0], lambda[ index0])
- ans[!index0] = (1-phi[!index0]) * dpois(x[!index0], lambda[!index0])
- }
- ans
+ if (log.arg) {
+ ans[ index0] = log(pobs0[index0])
+ ans[!index0] = log1p(-pobs0[!index0]) +
+ dpospois(x[!index0], lambda[!index0], log = TRUE)
+ } else {
+ ans[ index0] = pobs0[index0]
+ ans[!index0] = (1 - pobs0[!index0]) *
+ dpospois(x[!index0], lambda[!index0])
+ }
+ ans
}
-pzipois = function(q, lambda, phi = 0) {
- ans = ppois(q, lambda)
- LLL = max(length(phi), length(ans))
- if (length(phi) != LLL) phi = rep(phi, len = LLL);
- if (length(ans) != LLL) ans = rep(ans, len = LLL);
- ans = ifelse(q < 0, 0, phi + (1-phi) * ans)
- ans[phi < 0] = NaN
- ans[phi > 1] = NaN
- ans
-}
+pzapois = function(q, lambda, pobs0 = 0) {
+ LLL = max(length(q), length(lambda), length(pobs0))
+ if (length(q) != LLL) q = rep(q, len = LLL);
+ if (length(lambda) != LLL) lambda = rep(lambda, len = LLL);
+ if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
+ ans = rep(0.0, len = LLL)
-qzipois = function(p, lambda, phi = 0) {
- LLL = max(length(p), length(lambda), length(phi))
- ans = p = rep(p, len = LLL)
- lambda = rep(lambda, len = LLL)
- phi = rep(phi, len = LLL)
- ans[p<=phi] = 0
- ans[p>phi] = qpois((p[p>phi]-phi[p>phi])/(1-phi[p>phi]), lam=lambda[p>phi])
- ans[phi < 0] = NaN
- ans[phi > 1] = NaN
- ans
+ if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be in [0,1]")
+ ans[q > 0] = pobs0[q > 0] +
+ (1-pobs0[q > 0]) * ppospois(q[q > 0], lambda[q > 0])
+ ans[q < 0] = 0
+ ans[q == 0] = pobs0[q == 0]
+ ans
}
-rzipois = function(n, lambda, phi = 0) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
- stop("bad input for argument 'n'") else n
- ans = rpois(use.n, lambda)
- phi = rep(phi, len=length(ans))
- ans = ifelse(runif(use.n) < phi, 0, ans)
- ans[phi < 0] = NaN
- ans[phi > 1] = NaN
- ans
+qzapois = function(p, lambda, pobs0 = 0) {
+ LLL = max(length(p), length(lambda), length(pobs0))
+ if (length(p) != LLL) p = rep(p, len = LLL);
+ if (length(lambda) != LLL) lambda = rep(lambda, len = LLL);
+ if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
+
+ if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be between 0 and 1 inclusive")
+ ans = p
+ ind4 = (p > pobs0)
+ ans[!ind4] = 0
+ ans[ ind4] = qpospois((p[ind4] - pobs0[ind4]) / (1 - pobs0[ind4]),
+ lambda = lambda[ind4])
+ ans
}
- yip88 = function(link.lambda = "loge", n.arg = NULL)
-{
- if (mode(link.lambda) != "character" && mode(link.lambda) != "name")
- link.lambda = as.character(substitute(link.lambda))
-
- new("vglmff",
- blurb = c("Zero-inflated Poisson (based on Yip (1988))\n\n",
- "Link: ", namesof("lambda", link.lambda), "\n",
- "Variance: (1-phi)*lambda"),
- first=eval(substitute(expression({
- zero <- y == 0
- if (any(zero)) {
- if (length(extra)) extra$sumw = sum(w) else
- extra = list(sumw=sum(w))
- if (is.numeric(.n.arg) && extra$sumw != .n.arg)
- stop("value of 'n.arg' conflicts with data ",
- "(it need not be specified anyway)")
- warning("trimming out the zero observations")
-
- axa.save = attr(x, "assign")
- x = x[!zero,, drop = FALSE]
- attr(x, "assign") = axa.save # Don't lose these!!
- w = w[!zero]
- y = y[!zero]
- } else
- if (!is.numeric(.n.arg))
- stop("n.arg must be supplied")
-
- }), list( .n.arg = n.arg ))),
- initialize = eval(substitute(expression({
- narg = if (is.numeric(.n.arg)) .n.arg else extra$sumw
- if (sum(w) > narg)
- stop("sum(w) > narg")
-
- predictors.names = namesof("lambda", .link.lambda, tag = FALSE)
- if (!length(etastart)) {
- lambda.init = rep(median(y), length=length(y))
- etastart = theta2eta(lambda.init, .link.lambda)
- }
- if (length(extra)) {
- extra$sumw = sum(w)
- extra$narg = narg # For @linkinv
- } else
- extra = list(sumw=sum(w), narg = narg)
- }), list( .link.lambda = link.lambda, .n.arg = n.arg ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- lambda = eta2theta(eta, .link.lambda)
- temp5 = exp(-lambda)
- phi = (1 - temp5 - extra$sumw/extra$narg) / (1 - temp5)
- if (any(phi <= 0))
- stop("non-positive value(s) of phi")
- (1-phi) * lambda
- }, list( .link.lambda = link.lambda ))),
- last = eval(substitute(expression({
- misc$link = c(lambda = .link.lambda)
-
- if (ncol(x) == 1 && dimnames(x)[[2]]=="(Intercept)") {
- suma = extra$sumw
- phi = (1 - temp5[1] - suma/narg) / (1 - temp5[1])
- phi = if (phi < 0 || phi>1) NA else phi # phi is a probability
- misc$phi = phi
- }
- }), list( .link.lambda = link.lambda ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- lambda = eta2theta(eta, .link.lambda)
- temp5 = exp(-lambda)
- phi = (1 - temp5 - extra$sumw/extra$narg) / (1 - temp5)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dzipois(x = y, phi = phi, lambda = lambda, log = TRUE))
- }
- }, list( .link.lambda = link.lambda ))),
- vfamily = c("yip88"),
- deriv = eval(substitute(expression({
- lambda = eta2theta(eta, .link.lambda)
- temp5 = exp(-lambda)
- dl.dlambda = -1 + y/lambda - temp5/(1-temp5)
- dlambda.deta = dtheta.deta(lambda, .link.lambda)
- w * dl.dlambda * dlambda.deta
- }), list( .link.lambda = link.lambda ))),
- weight = eval(substitute(expression({
- d2lambda.deta2 = d2theta.deta2(lambda, .link.lambda)
- d2l.dlambda2 = -y / lambda^2 + temp5 / (1-temp5)^2
- -w * (d2l.dlambda2*dlambda.deta^2 + dl.dlambda*d2lambda.deta2)
- }), list( .link.lambda = link.lambda ))))
-}
+rzapois = function(n, lambda, pobs0 = 0) {
+ use.n = if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+ ans = rpospois(use.n, lambda)
+ if (length(pobs0) != use.n)
+ pobs0 = rep(pobs0, length = use.n)
+ if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be between 0 and 1 inclusive")
+ ifelse(runif(use.n) < pobs0, 0, ans)
+}
- zapoisson = function(lp0 = "logit", llambda = "loge",
- ep0 = list(), elambda = list(), zero = NULL) {
- if (mode(lp0) != "character" && mode(lp0) != "name")
- lp0 = as.character(substitute(lp0))
- if (mode(llambda) != "character" && mode(llambda) != "name")
- llambda = as.character(substitute(llambda))
- if (!is.list(ep0)) ep0 = list()
- if (!is.list(elambda)) elambda = list()
-
- new("vglmff",
- blurb = c("Zero-altered Poisson ",
- "(binomial and positive-Poisson conditional model)\n\n",
- "Links: ",
- namesof("p0", lp0, earg = ep0, tag = FALSE), ", ",
- namesof("lambda", llambda, earg = elambda, tag = FALSE), "\n"),
-
- constraints = eval(substitute(expression({
-
- dotzero <- .zero
- Musual <- 2
- eval(negzero.expression)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- Musual <- 2
- y = as.matrix(y)
- if (any(y != round(y )))
- stop("the response must be integer-valued")
- if (any(y < 0))
- stop("the response must not have negative values")
-
- extra$y0 = y0 = ifelse(y == 0, 1, 0)
- extra$NOS = NOS = ncoly = ncol(y) # Number of species
- extra$skip.these = skip.these = matrix(as.logical(y0), n, NOS)
-
- mynames1 = if (ncoly == 1) "p0" else
- paste("p0", 1:ncoly, sep = "")
- mynames2 = if (ncoly == 1) "lambda" else
- paste("lambda", 1:ncoly, sep = "")
- predictors.names =
- c(namesof(mynames1, .lp0, earg = .ep0, tag = FALSE),
- namesof(mynames2, .llambda, earg = .elambda, tag = FALSE))
- predictors.names = predictors.names[interleave.VGAM(Musual*NOS, M = Musual)]
-
- if (!length(etastart)) {
- etastart = cbind(theta2eta((0.5+w*y0)/(1+w), .lp0, earg = .ep0 ),
- matrix(1, n, NOS)) # 1 here is any old value
- for(spp. in 1:NOS) {
- sthese = skip.these[, spp.]
- etastart[!sthese, NOS+spp.] = theta2eta(
- y[!sthese, spp.] / (-expm1(-y[!sthese, spp.])),
- .llambda, earg = .elambda )
- }
- etastart = etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
- }
- }), list( .lp0 = lp0, .llambda = llambda,
- .ep0 = ep0, .elambda = elambda ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- NOS = extra$NOS
-
-
- p0 = cbind(eta2theta(eta[, 2*(1:NOS)-1, drop = FALSE],
- .lp0, earg = .ep0))
- lambda = cbind(eta2theta(eta[, 2*(1:NOS)-0, drop = FALSE],
- .llambda, earg = .elambda ))
-
- (1 - p0) * lambda / (-expm1(-lambda))
- }, list( .lp0 = lp0, .llambda = llambda,
- .ep0 = ep0, .elambda = elambda ))),
- last = eval(substitute(expression({
- temp.names = c(rep( .lp0, len = NOS),
- rep( .llambda, len = NOS))
- temp.names = temp.names[interleave.VGAM(Musual*NOS, M = Musual)]
- misc$link = temp.names
- misc$earg = vector("list", 2 * NOS)
- names(misc$link) <-
- names(misc$earg) <-
- c(mynames1, mynames2)[interleave.VGAM(Musual*NOS, M = Musual)]
- for(ii in 1:NOS) {
- misc$earg[[2*ii-1]] = .ep0
- misc$earg[[2*ii ]] = .elambda
- }
- }), list( .lp0 = lp0, .llambda = llambda,
- .ep0 = ep0, .elambda = elambda ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- NOS = extra$NOS
-
- p0 = cbind(eta2theta(eta[, 2*(1:NOS)-1, drop = FALSE],
- .lp0, earg = .ep0))
- lambda = cbind(eta2theta(eta[, 2*(1:NOS)-0, drop = FALSE],
- .llambda, earg = .elambda ))
-
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(w * dzapois(x = y, p0 = p0, lambda = lambda, log = TRUE))
- }
- }, list( .lp0 = lp0, .llambda = llambda,
- .ep0 = ep0, .elambda = elambda ))),
- vfamily = c("zapoisson"),
- deriv = eval(substitute(expression({
- NOS = extra$NOS
- y0 = extra$y0
- skip = extra$skip.these
-
- p0 = cbind(eta2theta(eta[, 2*(1:NOS)-1, drop = FALSE],
- .lp0, earg = .ep0))
- lambda = cbind(eta2theta(eta[, 2*(1:NOS)-0, drop = FALSE],
- .llambda, earg = .elambda ))
-
-
- dl.dlambda = y / lambda - 1 - 1 / expm1(lambda)
- for(spp. in 1:NOS)
- dl.dlambda[skip[, spp.], spp.] = 0
- dlambda.deta = dtheta.deta(lambda, .llambda, earg = .elambda)
- mup0 = p0
- temp3 = if (.lp0 == "logit") {
- w * (y0 - mup0)
- } else
- w * dtheta.deta(mup0, link = .lp0, earg = .ep0) *
- (y0 / mup0 - 1) / (1 - mup0)
- ans <- cbind(temp3,
- w * dl.dlambda * dlambda.deta)
- ans = ans[, interleave.VGAM(ncol(ans), M = Musual)]
- ans
- }), list( .lp0 = lp0, .llambda = llambda,
- .ep0 = ep0, .elambda = elambda ))),
- weight = eval(substitute(expression({
- wz = matrix( 10 * .Machine$double.eps^(3/4), n, 2*NOS)
- for(spp. in 1:NOS) {
- sthese = skip[, spp.]
- temp5 = expm1(lambda[!sthese, spp.])
- ed2l.dlambda2 = -(temp5 + 1) * (1 / lambda[!sthese, spp.] -
- 1 / temp5) / temp5
- wz[!sthese, NOS+spp.] = -w[!sthese] * ed2l.dlambda2 *
- (dlambda.deta[!sthese, spp.]^2)
- }
- tmp100 = mup0 * (1.0 - mup0)
- tmp200 = if ( .lp0 == "logit") {
- cbind(w * tmp100)
- } else {
- cbind(w * dtheta.deta(mup0, link= .lp0, earg = .ep0)^2 / tmp100)
- }
- for(ii in 1:NOS) {
- index200 = abs(tmp200[, ii]) < .Machine$double.eps
- if (any(index200)) {
- tmp200[index200, ii] = 10.0 * .Machine$double.eps^(3/4)
- }
- }
- wz[, 1:NOS] = tmp200
- wz = wz[, interleave.VGAM(ncol(wz), M = Musual)]
- wz
- }), list( .lp0 = lp0, .ep0 = ep0 ))))
-} # End of zapoisson
+dzipois = function(x, lambda, pstr0 = 0, log = FALSE) {
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+ LLL = max(length(x), length(lambda), length(pstr0))
+ if (length(x) != LLL) x = rep(x, len = LLL);
+ if (length(lambda) != LLL) lambda = rep(lambda, len = LLL);
+ if (length(pstr0) != LLL) pstr0 = rep(pstr0, len = LLL);
+ ans = x + lambda + pstr0
- zanegbinomial =
- function(lp0 = "logit", lmunb = "loge", lsize = "loge",
- ep0 = list(), emunb = list(), esize = list(),
- ipnb0 = NULL, isize = NULL, zero = -3,
- cutoff = 0.995, imethod = 1,
- shrinkage.init = 0.95)
-{
+ index0 = (x == 0)
+ if (log.arg) {
+ ans[ index0] = log(pstr0[ index0] + (1 - pstr0[ index0]) *
+ dpois(x[ index0], lambda[ index0]))
+ ans[!index0] = log1p(-pstr0[!index0]) +
+ dpois(x[!index0], lambda[!index0], log = TRUE)
+ } else {
+ ans[ index0] = pstr0[ index0] + (1 - pstr0[ index0]) *
+ dpois(x[ index0], lambda[ index0])
+ ans[!index0] = (1 - pstr0[!index0]) * dpois(x[!index0], lambda[!index0])
+ }
- if (!is.Numeric(cutoff, positiv = TRUE, allow = 1) ||
- cutoff < 0.8 || cutoff >= 1)
- stop("range error in the argument 'cutoff'")
- if (length(ipnb0) && (!is.Numeric(ipnb0, positiv = TRUE) ||
- max(ipnb0) >= 1))
- stop("If given, 'ipnb0' must contain values in (0,1) only")
- if (length(isize) && !is.Numeric(isize, positiv = TRUE))
- stop("If given, 'isize' must contain positive values only")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
- imethod > 2) stop("argument 'imethod' must be 1 or 2")
- if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
- shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
-
- if (mode(lmunb) != "character" && mode(lmunb) != "name")
- lmunb = as.character(substitute(lmunb))
- if (mode(lsize) != "character" && mode(lsize) != "name")
- lsize = as.character(substitute(lsize))
- if (mode(lp0) != "character" && mode(lp0) != "name")
- lp0 = as.character(substitute(lp0))
- if (!is.list(ep0)) ep0 = list()
- if (!is.list(emunb)) emunb = list()
- if (!is.list(esize)) esize = list()
-
- new("vglmff",
- blurb = c("Zero-altered negative binomial (binomial and\n",
- "positive-negative binomial conditional model)\n\n",
- "Links: ",
- namesof("p0", lp0, earg = ep0, tag = FALSE), ", ",
- namesof("munb", lmunb, earg = emunb, tag = FALSE), ", ",
- namesof("size", lsize, earg = esize, tag = FALSE), "\n",
- "Mean: (1-p0) * munb / [1 - (size/(size+munb))^size]"),
- constraints = eval(substitute(expression({
-
- dotzero <- .zero
- Musual <- 3
- eval(negzero.expression)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- Musual <- 3
- y = as.matrix(y)
- extra$NOS = NOS = ncoly = ncol(y) # Number of species
- M = Musual * ncoly #
- if (any(y != round(y)))
- stop("the response must be integer-valued")
- if (any(y < 0))
- stop("the response must not have negative values")
-
- mynames1 = if (NOS == 1) "p0" else paste("p0", 1:NOS, sep = "")
- mynames2 = if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = "")
- mynames3 = if (NOS == 1) "size" else paste("size", 1:NOS, sep = "")
- predictors.names =
- c(namesof(mynames1, .lp0, earg = .ep0, tag = FALSE),
- namesof(mynames2, .lmunb, earg = .emunb, tag = FALSE),
- namesof(mynames3, .lsize, earg = .esize, tag = FALSE))
- predictors.names =
- predictors.names[interleave.VGAM(Musual*NOS, M = Musual)]
- extra$y0 = y0 = ifelse(y == 0, 1, 0)
- extra$skip.these = skip.these = matrix(as.logical(y0), n, NOS)
-
- if (!length(etastart)) {
- mu.init = y
- for(iii in 1:ncol(y)) {
- index.posy = (y[,iii] > 0)
- use.this = if ( .imethod == 2) {
- weighted.mean(y[index.posy,iii], w[index.posy])
- } else {
- median(rep(y[index.posy,iii], w[index.posy])) + 1/2
- }
- mu.init[ index.posy,iii] = (1- .sinit) * y[index.posy,iii] +
- .sinit * use.this
- mu.init[!index.posy,iii] = use.this
- max.use.this = 7 * use.this + 10
- vecTF = (mu.init[,iii] > max.use.this)
- if (any(vecTF))
- mu.init[vecTF,iii] = max.use.this
- }
-
-
- pnb0 = matrix(if(length( .ipnb0)) .ipnb0 else -1,
- nr=n, nc=NOS, byrow = TRUE)
- for(spp. in 1:NOS) {
- if (any(pnb0[,spp.] < 0)) {
- index.y0 = y[,spp.] < 0.5
- pnb0[,spp.] = max(min(sum(index.y0)/n, 0.97), 0.03)
- }
- }
-
-
- if ( is.Numeric( .isize )) {
- kmat0 = matrix( .isize, nr=n, nc=ncoly, byrow = TRUE)
- } else {
- posnegbinomial.Loglikfun = function(kmat, y, x, w, extraargs) {
- munb = extraargs
- sum(w * dposnegbin(x = y, munb = munb, size = kmat, log = TRUE))
- }
- k.grid = 2^((-6):6)
- kmat0 = matrix(0, nr=n, nc=NOS)
- for(spp. in 1:NOS) {
- index.posy = y[,spp.] > 0
- posy = y[index.posy, spp.]
- kmat0[,spp.] = getMaxMin(k.grid,
- objfun = posnegbinomial.Loglikfun,
- y = posy, x = x[index.posy,],
- w = w[index.posy],
- extraargs = mu.init[index.posy, spp.])
- }
- }
-
- etastart = cbind(theta2eta(pnb0, .lp0, earg = .ep0 ),
- theta2eta(mu.init, .lmunb, earg = .emunb),
- theta2eta(kmat0, .lsize, earg = .esize ))
- etastart = etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
- }
- }), list( .lp0 = lp0, .lmunb = lmunb, .lsize = lsize,
- .ep0 = ep0, .emunb = emunb, .esize = esize,
- .ipnb0 = ipnb0, .isize = isize,
- .imethod = imethod, .sinit = shrinkage.init ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- NOS <- extra$NOS
- p0 <- eta2theta(eta[,3*(1:NOS)-2],
- .lp0, earg = .ep0 )
- munb <- eta2theta(eta[,3*(1:NOS)-1, drop = FALSE],
- .lmunb, earg = .emunb )
- kmat <- eta2theta(eta[,3*(1:NOS), drop = FALSE],
- .lsize, earg = .esize )
- pnb0 <- (kmat / (kmat + munb))^kmat # p(0) from negative binomial
- (1 - p0) * munb / (1 - pnb0)
- }, list( .lp0 = lp0, .lsize = lsize, .lmunb = lmunb,
- .ep0 = ep0, .emunb = emunb, .esize = esize ))),
- last = eval(substitute(expression({
- misc$link = c(rep( .lp0, length = NOS),
- rep( .lmunb, length = NOS),
- rep( .lsize, length = NOS))
- temp.names = c(mynames1, mynames2, mynames3)
- temp.names = temp.names[interleave.VGAM(Musual*NOS, M = Musual)]
- names(misc$link) = temp.names
- misc$earg = vector("list", Musual*NOS)
- names(misc$earg) = temp.names
- for(ii in 1:NOS) {
- misc$earg[[3*ii-2]] = .ep0
- misc$earg[[3*ii-1]] = .emunb
- misc$earg[[3*ii ]] = .esize
- }
- misc$cutoff = .cutoff
- misc$imethod = .imethod
- }), list( .lp0 = lp0, .lmunb = lmunb, .lsize = lsize,
- .ep0 = ep0, .emunb = emunb, .esize = esize,
- .cutoff = cutoff,
- .imethod = imethod ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- NOS = extra$NOS
- p0 = eta2theta(eta[,3*(1:NOS)-2, drop = FALSE], .lp0, earg = .ep0 )
- munb = eta2theta(eta[,3*(1:NOS)-1, drop = FALSE], .lmunb, earg = .emunb )
- kmat = eta2theta(eta[,3*(1:NOS), drop = FALSE], .lsize, earg = .esize )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dzanegbin(x = y, p0 = p0, munb = munb, size = kmat, log = TRUE))
- }
- }, list( .lp0 = lp0, .lmunb = lmunb, .lsize = lsize,
- .ep0 = ep0, .emunb = emunb, .esize = esize ))),
- vfamily = c("zanegbinomial"),
- deriv = eval(substitute(expression({
- Musual <- 3
- NOS = extra$NOS
- y0 = extra$y0
- p0 = eta2theta(eta[,3*(1:NOS)-2], .lp0, earg = .ep0 )
- munb = eta2theta(eta[,3*(1:NOS)-1, drop = FALSE], .lmunb, earg = .emunb )
- kmat = eta2theta(eta[,3*(1:NOS), drop = FALSE], .lsize, earg = .esize )
- skip = extra$skip.these
-
- d3 = deriv3(~ -log(1 - (kmat. /(kmat. + munb. ))^kmat. ),
- c("munb.", "kmat."), hessian= TRUE) # Extra term
- dl0.dthetas = array(NA, c(n, NOS, 2))
- d2l0.dthetas2 = array(NA, c(n, NOS, 3)) # matrix-band format
- for(spp. in 1:NOS) {
- kmat. = kmat[,spp.]
- munb. = munb[,spp.]
- eval.d3 = eval(d3) # Evaluated for one species
- dl0.dthetas[,spp.,1] = attr(eval.d3, "gradient")[, 1]
- dl0.dthetas[,spp.,2] = attr(eval.d3, "gradient")[, 2]
- d2l0.dthetas2[,spp.,1] = attr(eval.d3, "hessian")[,1,1]
- d2l0.dthetas2[,spp.,2] = attr(eval.d3, "hessian")[,2,2]
- d2l0.dthetas2[,spp.,3] = attr(eval.d3, "hessian")[,1,2]
- }
- dl.dmunb = y/munb - (y+kmat)/(kmat+munb) + dl0.dthetas[,,1]
- dl.dk = digamma(y+kmat) - digamma(kmat) - (y+kmat)/(munb+kmat) + 1 +
- log(kmat/(kmat+munb)) + dl0.dthetas[,,2]
- for(spp. in 1:NOS)
- dl.dk[skip[,spp.],spp.] = dl.dmunb[skip[,spp.],spp.] = 0
-
- dmunb.deta = dtheta.deta(munb, .lmunb, earg = .emunb )
- dk.deta = dtheta.deta(kmat, .lsize, earg = .esize )
- myderiv = c(w) * cbind(dl.dmunb * dmunb.deta,
- dl.dk * dk.deta)
-
- mup0 = p0
- temp3 = if ( .lp0 == "logit") {
- w * (y0 - mup0)
- } else
- w * dtheta.deta(mup0, link= .lp0, earg = .ep0 ) *
- (y0/mup0 - 1) / (1-mup0)
-
- ans = cbind(temp3, myderiv)
- ans = ans[, interleave.VGAM(ncol(ans), M = Musual)]
- ans
- }), list( .lp0 = lp0, .lmunb = lmunb, .lsize = lsize,
- .ep0 = ep0, .emunb = emunb, .esize = esize ))),
- weight = eval(substitute(expression({
- wz = matrix(0, n, 6*NOS-1) # wz is not 'diagonal'
- pnb0 = (kmat / (kmat + munb))^kmat
- ed2l.dmunb2 = (1/munb - (munb + kmat*(1-pnb0))/(munb +
- kmat)^2) / (1-pnb0) - d2l0.dthetas2[,,1]
- wz[,3*(1:NOS)-1] = w * dmunb.deta^2 * ed2l.dmunb2
- wz[,3*NOS+3*(1:NOS)-1] = -w * d2l0.dthetas2[,,3] * dmunb.deta * dk.deta
-
-
-
- fred = dotFortran(name="enbin8",
- ans=double(n*NOS), as.double(kmat),
- as.double(kmat/(munb+kmat)), as.double(.cutoff),
- as.integer(n), ok=as.integer(1), as.integer(NOS),
- sumpdf=double(1), macheps=as.double(.Machine$double.eps))
- if (fred$ok != 1)
- stop("error in Fortran subroutine exnbin")
- dim(fred$ans) = c(n, NOS)
- ed2l.dk2 = -fred$ans/(1-pnb0) - 1/kmat + 1/(kmat+munb) -
- munb * pnb0 / ((1-pnb0)*(munb+kmat)^2) - d2l0.dthetas2[,,2]
- wz[,3*(1:NOS)] = w * dk.deta^2 * ed2l.dk2
-
-
-
-
- tmp100 = mup0*(1-mup0)
- tmp200 = if (.lp0 == "logit") {
- cbind(w * tmp100)
- } else {
- cbind(w * dtheta.deta(mup0, link= .lp0, earg = .ep0 )^2 / tmp100)
- }
- for(ii in 1:NOS) {
- index200 = abs(tmp200[,ii]) < .Machine$double.eps
- if (any(index200)) {
- tmp200[index200,ii] = .Machine$double.eps # Diagonal 0's are bad
- }
- }
- wz[,3*(1:NOS)-2] = tmp200
- for(spp. in 1:NOS) {
- wz[skip[,spp.],3*spp. - 1] =
- wz[skip[,spp.],3*spp.] = sqrt(.Machine$double.eps)
- wz[skip[,spp.],3*NOS+3*(spp.)-1] = 0
- }
+ deflat_limit = -1 / expm1(lambda)
+ ans[pstr0 < deflat_limit] = NaN
+ ans[pstr0 > 1] = NaN
- wz
- }), list( .lp0 = lp0, .ep0 = ep0, .cutoff = cutoff ))))
+ ans
}
+pzipois = function(q, lambda, pstr0 = 0) {
+ LLL = max(length(pstr0), length(lambda), length(q))
+ if (length(pstr0) != LLL) pstr0 = rep(pstr0, len = LLL);
+ if (length(lambda) != LLL) lambda = rep(lambda, len = LLL);
+ if (length(q) != LLL) q = rep(q, len = LLL);
- if (FALSE)
-rposnegbin = function(n, munb, k) {
- if (!is.Numeric(k, posit = TRUE))
- stop("argument 'k' must be positive")
- if (!is.Numeric(munb, posit = TRUE))
- stop("argument 'munb' must be positive")
- if (!is.Numeric(n, posit = TRUE, integ = TRUE, allow = 1))
- stop("argument 'n' must be a positive integer")
- ans = rnbinom(n=n, mu=munb, size=k)
- munb = rep(munb, len=n)
- k = rep(k, len=n)
- index = ans == 0
- while(any(index)) {
- more = rnbinom(n=sum(index), mu=munb[index], size=k[index])
- ans[index] = more
- index = ans == 0
- }
- ans
-}
+ ans = ppois(q, lambda)
+ ans = ifelse(q < 0, 0, pstr0 + (1 - pstr0) * ans)
+ deflat_limit = -1 / expm1(lambda)
+ ans[pstr0 < deflat_limit] = NaN
+ ans[pstr0 > 1] = NaN
- if (FALSE)
-dposnegbin = function(x, munb, k, log = FALSE) {
- if (!is.Numeric(k, posit = TRUE))
- stop("argument 'k' must be positive")
- if (!is.Numeric(munb, posit = TRUE))
- stop("argument 'munb' must be positive")
- ans = dnbinom(x = x, mu=munb, size=k, log=log)
- ans0 = dnbinom(x=0, mu=munb, size=k, log = FALSE)
- ans = if (log) ans - log1p(-ans0) else ans/(1-ans0)
- ans[x == 0] = if (log) -Inf else 0
- ans
+
+ ans
}
+qzipois = function(p, lambda, pstr0 = 0) {
+ LLL = max(length(p), length(lambda), length(pstr0))
+ ans =
+ p = rep(p, len = LLL)
+ lambda = rep(lambda, len = LLL)
+ pstr0 = rep(pstr0, len = LLL)
+ ans[p <= pstr0] = 0
+ pindex = (p > pstr0)
+ ans[pindex] = qpois((p[pindex] - pstr0[pindex]) / (1 - pstr0[pindex]),
+ lambda = lambda[pindex])
- zipoisson = function(lphi = "logit", llambda = "loge",
- ephi = list(), elambda = list(),
- iphi = NULL, ilambda = NULL, imethod = 1,
- shrinkage.init = 0.8, zero = NULL)
-{
- if (mode(lphi) != "character" && mode(lphi) != "name")
- lphi = as.character(substitute(lphi))
- if (mode(llambda) != "character" && mode(llambda) != "name")
- llambda = as.character(substitute(llambda))
- if (is.Numeric(iphi))
- if (!is.Numeric(iphi, posit = TRUE) || any(iphi >= 1))
- stop("'iphi' values must be inside the interval (0,1)")
- if (is.Numeric(ilambda))
- if (!is.Numeric(ilambda, posit = TRUE))
- stop("'ilambda' values must be positive")
- if (!is.list(ephi)) ephi = list()
- if (!is.list(elambda)) elambda = list()
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
- imethod > 2) stop("argument 'imethod' must be 1 or 2")
- if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
- shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
+ deflat_limit = -1 / expm1(lambda)
+ ind0 = (deflat_limit <= pstr0) & (pstr0 < 0)
+ if (any(ind0)) {
+ pobs0 = pstr0[ind0] + (1 - pstr0[ind0]) * exp(-lambda[ind0])
+ ans[p[ind0] <= pobs0] = 0
+ pindex = (1:LLL)[ind0 & (p > pobs0)]
+ Pobs0 = pstr0[pindex] + (1 - pstr0[pindex]) * exp(-lambda[pindex])
+ ans[pindex] = qpospois((p[pindex] - Pobs0) / (1 - Pobs0),
+ lambda = lambda[pindex])
+ }
- new("vglmff",
- blurb = c("Zero-inflated Poisson\n\n",
- "Links: ",
- namesof("phi", lphi, earg = ephi), ", ",
- namesof("lambda", llambda, earg = elambda), "\n",
- "Mean: (1-phi)*lambda"),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(as.matrix(y)) != 1)
- stop("multivariate responses not allowed")
- if (any(round(y) != y))
- stop("integer-valued responses only allowed for ",
- "the 'zipoisson' family")
- predictors.names = c(
- namesof("phi", .lphi, earg = .ephi, tag = FALSE),
- namesof("lambda", .llambda, earg = .ephi, tag = FALSE))
+ ans[pstr0 < deflat_limit] = NaN
+ ans[pstr0 > 1] = NaN
- if (!length(etastart)) {
- phi.init = if (length( .iphi )) .iphi else {
- 0.5 * sum(w[y == 0]) / sum(w)
- }
- phi.init[phi.init <= 0.02] = 0.02 # Last resort
- phi.init[phi.init >= 0.98] = 0.98 # Last resort
+ ans[p < 0] = NaN
+ ans[p > 1] = NaN
+ ans
+}
- if ( length( .ilambda )) {
- lambda.init = rep( .ilambda, len = n)
- } else if ( length(mustart)) {
- lambda.init = mustart / (1 - phi.init)
- } else if ( .imethod == 2) {
- mymean = weighted.mean(y[y > 0], w[y > 0]) + 1/16
- lambda.init = (1 - .sinit) * (y + 1/8) + .sinit * mymean
- } else {
- use.this = median(y[y > 0]) + 1 / 16
- lambda.init = (1 - .sinit) * (y + 1/8) + .sinit * use.this
- }
+rzipois = function(n, lambda, pstr0 = 0) {
+ use.n = if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
- zipois.Loglikfun = function(phival, y, x, w, extraargs) {
- sum(w * dzipois(x = y, phi = phival,
- lambda = extraargs$lambda,
- log = TRUE))
- }
- phi.grid = seq(0.02, 0.98, len = 21)
- init.phi = getMaxMin(phi.grid,
- objfun = zipois.Loglikfun,
- y = y, x = x, w = w,
- extraargs = list(lambda = lambda.init))
- phi.init = if (length( .iphi )) .iphi else init.phi
- if (length(mustart)) {
- lambda.init = lambda.init / (1 - phi.init)
- }
+ if (length(pstr0) != use.n) pstr0 = rep(pstr0, len = use.n);
+ if (length(lambda) != use.n) lambda = rep(lambda, len = use.n);
+
+ ans = rpois(use.n, lambda)
+ ans = ifelse(runif(use.n) < pstr0, 0, ans)
- etastart = cbind(theta2eta(rep(phi.init, len = n), .lphi, .ephi ),
- theta2eta(lambda.init, .llambda, .elambda ))
- mustart <- NULL # Since etastart has been computed.
- }
- }), list( .lphi = lphi, .llambda = llambda,
- .ephi = ephi, .elambda = elambda,
- .iphi = iphi, .ilambda = ilambda,
- .imethod = imethod, .sinit = shrinkage.init ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- phivec = eta2theta(eta[, 1], .lphi, earg = .ephi )
- lambda = eta2theta(eta[, 2], .llambda, earg = .elambda )
- (1 - phivec) * lambda
- }, list( .lphi = lphi, .llambda = llambda,
- .ephi = ephi, .elambda = elambda ))),
- last = eval(substitute(expression({
- misc$link <- c("phi" = .lphi, "lambda" = .llambda)
- misc$earg <- list("phi" = .ephi, "lambda" = .elambda)
- if (intercept.only) {
- phi = eta2theta(eta[1, 1], .lphi, earg = .ephi )
- lambda = eta2theta(eta[1, 2], .llambda, earg = .elambda )
- misc$prob0 = phi + (1 - phi) * exp(-lambda) # P(Y=0)
- }
- }), list( .lphi = lphi, .llambda = llambda,
- .ephi = ephi, .elambda = elambda ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- smallno = 100 * .Machine$double.eps
- phi = eta2theta(eta[, 1], .lphi, earg = .ephi )
- phi = pmax(phi, smallno)
- phi = pmin(phi, 1.0-smallno)
- lambda = eta2theta(eta[, 2], .llambda, earg = .elambda )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dzipois(x = y, phi = phi, lambda = lambda, log = TRUE))
- }
- }, list( .lphi = lphi, .llambda = llambda,
- .ephi = ephi, .elambda = elambda ))),
- vfamily = c("zipoisson"),
- deriv = eval(substitute(expression({
- smallno = 100 * .Machine$double.eps
- phi = eta2theta(eta[, 1], .lphi, earg = .ephi )
- phi = pmax(phi, smallno)
- phi = pmin(phi, 1.0-smallno)
- lambda = eta2theta(eta[, 2], .llambda, earg = .elambda )
- tmp8 = phi + (1-phi)*exp(-lambda)
- index0 = (y == 0)
- dl.dphi = -expm1(-lambda) / tmp8
- dl.dphi[!index0] = -1 / (1-phi[!index0])
- dl.dlambda = -(1-phi) * exp(-lambda) / tmp8
- dl.dlambda[!index0] = (y[!index0] - lambda[!index0]) / lambda[!index0]
- dphi.deta = dtheta.deta(phi, .lphi, earg = .ephi)
- dlambda.deta = dtheta.deta(lambda, .llambda, earg = .elambda )
- ans = c(w) * cbind(dl.dphi * dphi.deta,
- dl.dlambda * dlambda.deta)
- if (.llambda == "loge" && (any(lambda[!index0] < .Machine$double.eps))) {
- ans[!index0,2] = w[!index0] * (y[!index0] - lambda[!index0])
- }
- ans
- }), list( .lphi = lphi, .llambda = llambda,
- .ephi = ephi, .elambda = elambda ))),
- weight = eval(substitute(expression({
- wz = matrix(as.numeric(NA), nrow = n, ncol = dimm(M))
- d2l.dphi2 = -expm1(-lambda) / ((1-phi)*tmp8)
- d2l.dlambda2 = (1-phi)/lambda - phi*(1-phi)*exp(-lambda) / tmp8
- d2l.dphilambda = -exp(-lambda) / tmp8
- wz[, iam(1,1,M)] = d2l.dphi2 * dphi.deta^2
- wz[, iam(2,2,M)] = d2l.dlambda2 * dlambda.deta^2
- wz[, iam(1,2,M)] = d2l.dphilambda * dphi.deta * dlambda.deta
- if (.llambda == "loge" && (any(lambda[!index0] < .Machine$double.eps))) {
- ind5 = !index0 & (lambda < .Machine$double.eps)
- if (any(ind5))
- wz[ind5,iam(2,2,M)] = (1-phi[ind5]) * .Machine$double.eps
- }
- c(w) * wz
- }), list( .llambda = llambda ))))
-}
+ prob0 = exp(-lambda)
+ deflat_limit = -1 / expm1(lambda)
+ ind0 = (deflat_limit <= pstr0) & (pstr0 < 0)
+ if (any(ind0)) {
+ pobs0 = pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
+ ans[ind0] = rpospois(sum(ind0), lambda[ind0])
+ ans[ind0] = ifelse(runif(sum(ind0)) < pobs0, 0, ans[ind0])
+ }
+ ans[pstr0 < deflat_limit] = NaN
+ ans[pstr0 > 1] = NaN
+ ans
+}
- zibinomial = function(lphi = "logit", lmu = "logit",
- ephi = list(), emu = list(),
- iphi = NULL, zero = 1, mv = FALSE,
- imethod = 1)
-{
- if (as.logical(mv)) stop("argument 'mv' must be FALSE")
- if (mode(lphi) != "character" && mode(lphi) != "name")
- lphi = as.character(substitute(lphi))
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu = as.character(substitute(lmu))
- if (is.Numeric(iphi))
- if (!is.Numeric(iphi, posit = TRUE) || any(iphi >= 1))
- stop("'iphi' values must be inside the interval (0,1)")
- if (!is.list(ephi)) ephi = list()
- if (!is.list(emu)) emu = list()
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
- imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
-
-
- new("vglmff",
- blurb = c("Zero-inflated binomial\n\n",
- "Links: ",
- namesof("phi", lphi, earg = ephi ), ", ",
- namesof("mu", lmu, earg = emu ), "\n",
- "Mean: (1-phi) * mu / (1 - (1-mu)^w)"),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (!all(w == 1))
- extra$orig.w = w
-
-
- {
- NCOL = function (x)
- if (is.array(x) && length(dim(x)) > 1 ||
- is.data.frame(x)) ncol(x) else as.integer(1)
-
- if (NCOL(y) == 1) {
- if (is.factor(y)) y <- y != levels(y)[1]
- nn = rep(1, n)
- if (!all(y >= 0 & y <= 1))
- stop("response values must be in [0, 1]")
- if (!length(mustart) && !length(etastart))
- mustart = (0.5 + w * y) / (1.0 + w)
-
-
- no.successes = y
- if (min(y) < 0)
- stop("Negative data not allowed!")
- if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
- stop("Number of successes must be integer-valued")
-
- } else if (NCOL(y) == 2) {
- if (min(y) < 0)
- stop("Negative data not allowed!")
- if (any(abs(y - round(y)) > 1.0e-8))
- stop("Count data must be integer-valued")
- y = round(y)
- nvec = y[, 1] + y[, 2]
- y = ifelse(nvec > 0, y[, 1] / nvec, 0)
- w = w * nvec
- if (!length(mustart) && !length(etastart))
- mustart = (0.5 + nvec * y) / (1 + nvec)
- } else {
- stop("for the binomialff family, response 'y' must be a ",
- "vector of 0 and 1's\n",
- "or a factor ",
- "(first level = fail, other levels = success),\n",
- "or a 2-column matrix where col 1 is the no. of ",
- "successes and col 2 is the no. of failures")
- }
- }
+ yip88 = function(link.lambda = "loge", n.arg = NULL) {
+ if (mode(link.lambda) != "character" && mode(link.lambda) != "name")
+ link.lambda = as.character(substitute(link.lambda))
- predictors.names =
- c(namesof("phi", .lphi, earg = .ephi, tag = FALSE),
- namesof("mu", .lmu, earg = .emu, tag = FALSE))
+ new("vglmff",
+ blurb = c("Zero-inflated Poisson (based on Yip (1988))\n\n",
+ "Link: ", namesof("lambda", link.lambda), "\n",
+ "Variance: (1 - pstr0) * lambda"),
+ first = eval(substitute(expression({
+ zero <- y == 0
+ if (any(zero)) {
+ if (length(extra)) extra$sumw = sum(w) else
+ extra = list(sumw=sum(w))
+ if (is.numeric(.n.arg) && extra$sumw != .n.arg)
+ stop("value of 'n.arg' conflicts with data ",
+ "(it need not be specified anyway)")
+ warning("trimming out the zero observations")
+
+ axa.save = attr(x, "assign")
+ x = x[!zero,, drop = FALSE]
+ attr(x, "assign") = axa.save # Don't lose these!!
+ w = w[!zero]
+ y = y[!zero]
+ } else {
+ if (!is.numeric(.n.arg))
+ stop("n.arg must be supplied")
+ }
+
+ }), list( .n.arg = n.arg ))),
+ initialize = eval(substitute(expression({
+ narg = if (is.numeric(.n.arg)) .n.arg else extra$sumw
+ if (sum(w) > narg)
+ stop("sum(w) > narg")
- phi.init = if (length( .iphi )) .iphi else {
- prob0.est = sum(w[y == 0]) / sum(w)
- if ( .imethod == 1) {
- (prob0.est - (1 - mustart)^w) / (1 - (1 - mustart)^w)
- } else {
- prob0.est
- }
- }
+ predictors.names = namesof("lambda", .link.lambda, tag = FALSE)
+ if (!length(etastart)) {
+ lambda.init = rep(median(y), length = length(y))
+ etastart = theta2eta(lambda.init, .link.lambda)
+ }
+ if (length(extra)) {
+ extra$sumw = sum(w)
+ extra$narg = narg # For @linkinv
+ } else {
+ extra = list(sumw = sum(w), narg = narg)
+ }
+ }), list( .link.lambda = link.lambda, .n.arg = n.arg ))),
- phi.init[phi.init <= -0.10] = 0.50 # Lots of sample variation
- phi.init[phi.init <= 0.01] = 0.05 # Last resort
- phi.init[phi.init >= 0.99] = 0.95 # Last resort
-
- if ( length(mustart) && !length(etastart))
- mustart = cbind(rep(phi.init, len = n),
- mustart) # 1st coln not a real mu
- }), list( .lphi = lphi, .lmu = lmu,
- .ephi = ephi, .emu = emu,
- .iphi = iphi,
- .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- phi = eta2theta(eta[, 1], .lphi, earg = .ephi )
- mubin = eta2theta(eta[, 2], .lmu, earg = .emu )
- (1-phi) * mubin
- }, list( .lphi = lphi, .lmu = lmu,
- .ephi = ephi, .emu = emu ))),
- last = eval(substitute(expression({
- misc$link = c("phi" = .lphi, "mu" = .lmu)
- misc$earg = list("phi" = .ephi, "mu" = .emu )
- misc$imethod = .imethod
-
-
- if (intercept.only && all(w == w[1])) {
- phi = eta2theta(eta[1,1], .lphi, earg = .ephi )
- mubin = eta2theta(eta[1,2], .lmu, earg = .emu )
- misc$p0 = phi + (1-phi) * (1-mubin)^w[1] # P(Y=0)
- }
- }), list( .lphi = lphi, .lmu = lmu,
- .ephi = ephi, .emu = emu,
- .imethod = imethod ))),
- linkfun = eval(substitute(function(mu, extra = NULL) {
- cbind(theta2eta(mu[, 1], .lphi, earg = .ephi ),
- theta2eta(mu[, 2], .lmu, earg = .emu ))
- }, list( .lphi = lphi, .lmu = lmu,
- .ephi = ephi, .emu = emu ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- phi = eta2theta(eta[, 1], .lphi, earg = .ephi )
- mubin = eta2theta(eta[, 2], .lmu, earg = .emu )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(dzibinom(x = round(w * y), size = w, prob = mubin,
- log = TRUE, phi = phi))
- }
- }, list( .lphi = lphi, .lmu = lmu,
- .ephi = ephi, .emu = emu ))),
- vfamily = c("zibinomial"),
- deriv = eval(substitute(expression({
- phi = eta2theta(eta[, 1], .lphi, earg = .ephi )
- mubin = eta2theta(eta[, 2], .lmu, earg = .emu )
-
- prob0 = (1 - mubin)^w # Actually q^w
- tmp8 = phi + (1 - phi) * prob0
- index = (y == 0)
- dl.dphi = (1 - prob0) / tmp8
- dl.dphi[!index] = -1 / (1 - phi[!index])
- dl.dmubin = -w * (1 - phi) * (1 - mubin)^(w - 1) / tmp8
- dl.dmubin[!index] = w[!index] *
- (y[!index] / mubin[!index] -
- (1 - y[!index]) / (1 - mubin[!index]))
- dphi.deta = dtheta.deta(phi, .lphi, earg = .ephi )
- dmubin.deta = dtheta.deta(mubin, .lmu, earg = .emu )
- ans = cbind(dl.dphi * dphi.deta,
- dl.dmubin * dmubin.deta)
- if ( .lmu == "logit") {
- ans[!index,2] = w[!index] * (y[!index] - mubin[!index])
- }
- ans
- }), list( .lphi = lphi, .lmu = lmu,
- .ephi = ephi, .emu = emu ))),
- weight = eval(substitute(expression({
- wz = matrix(as.numeric(NA), nrow = n, ncol = dimm(M))
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ lambda = eta2theta(eta, .link.lambda)
+ temp5 = exp(-lambda)
+ pstr0 = (1 - temp5 - extra$sumw/extra$narg) / (1 - temp5)
+ if (any(pstr0 <= 0))
+ stop("non-positive value(s) of pstr0")
+ (1-pstr0) * lambda
+ }, list( .link.lambda = link.lambda ))),
+ last = eval(substitute(expression({
+ misc$link = c(lambda = .link.lambda )
+ if (intercept.only) {
+ suma = extra$sumw
+ pstr0 = (1 - temp5[1] - suma / narg) / (1 - temp5[1])
+ pstr0 = if (pstr0 < 0 || pstr0 > 1) NA else pstr0
+ misc$pstr0 = pstr0
+ }
+ }), list( .link.lambda = link.lambda ))),
- d2l.dphi2 = (1 - prob0) / ((1 - phi) * tmp8)
+ loglikelihood = eval(substitute(function(mu, y, w, residuals = FALSE,
+ eta, extra = NULL) {
+ lambda = eta2theta(eta, .link.lambda)
+ temp5 = exp(-lambda)
+ pstr0 = (1 - temp5 - extra$sumw / extra$narg) / (1 - temp5)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * dzipois(x = y, pstr0 = pstr0, lambda = lambda, log = TRUE))
+ }
+ }, list( .link.lambda = link.lambda ))),
+ vfamily = c("yip88"),
+ deriv = eval(substitute(expression({
+ lambda = eta2theta(eta, .link.lambda)
+ temp5 = exp(-lambda)
+ dl.dlambda = -1 + y/lambda - temp5/(1-temp5)
+ dlambda.deta = dtheta.deta(lambda, .link.lambda)
+ w * dl.dlambda * dlambda.deta
+ }), list( .link.lambda = link.lambda ))),
+ weight = eval(substitute(expression({
+ d2lambda.deta2 = d2theta.deta2(lambda, .link.lambda)
+ d2l.dlambda2 = -y / lambda^2 + temp5 / (1 - temp5)^2
+ -w * (d2l.dlambda2*dlambda.deta^2 + dl.dlambda*d2lambda.deta2)
+ }), list( .link.lambda = link.lambda ))))
+}
- d2l.dphimubin = -w * (1 - mubin)^(w - 1) / tmp8
+ zapoisson = function(lpobs0 = "logit", llambda = "loge",
+ epobs0 = list(), elambda = list(), zero = NULL) {
- d2l.dmubin2 = w * (1 - phi) *
- (1 / (mubin * (1 - mubin)) -
- (tmp8 * (w - 1) * (1 - mubin)^(w - 2) -
- (1 - phi) * w * (1 - mubin)^(2*(w - 1))) / tmp8)
- wz[,iam(1,1,M)] = d2l.dphi2 * dphi.deta^2
- wz[,iam(2,2,M)] = d2l.dmubin2 * dmubin.deta^2
- wz[,iam(1,2,M)] = d2l.dphimubin * dphi.deta * dmubin.deta
- if (TRUE) {
- ind6 = (wz[,iam(2,2,M)] < .Machine$double.eps)
- if (any(ind6))
- wz[ind6,iam(2,2,M)] = .Machine$double.eps
- }
- wz
- }), list( .lphi = lphi, .lmu = lmu,
- .ephi = ephi, .emu = emu ))))
-}
+ lpobs_0 = lpobs0
+ epobs_0 = epobs0
+ if (mode(lpobs_0) != "character" && mode(lpobs_0) != "name")
+ lpobs_0 = as.character(substitute(lpobs_0))
+ if (mode(llambda) != "character" && mode(llambda) != "name")
+ llambda = as.character(substitute(llambda))
+ if (!is.list(epobs_0)) epobs_0 = list()
+ if (!is.list(elambda)) elambda = list()
-dzibinom = function(x, size, prob, log = FALSE, phi = 0) {
- if (!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
- rm(log)
+ new("vglmff",
+ blurb = c("Zero-altered Poisson ",
+ "(Bernoulli and positive-Poisson conditional model)\n\n",
+ "Links: ",
+ namesof("pobs0", lpobs_0, earg = epobs_0, tag = FALSE), ", ",
+ namesof("lambda", llambda, earg = elambda, tag = FALSE), "\n",
+ "Mean: (1 - pobs0) * lambda / (1 - exp(-lambda))"),
- LLL = max(length(x), length(size), length(prob), length(phi))
- if (length(x) != LLL) x = rep(x, len = LLL);
- if (length(size) != LLL) size = rep(size, len = LLL);
- if (length(prob) != LLL) prob = rep(prob, len = LLL);
- if (length(phi) != LLL) phi = rep(phi, len = LLL);
- ans = dbinom(x = x, size = size, prob = prob, log = TRUE)
- if (!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
- stop("argument 'phi' must be between 0 and 1 inclusive")
- if (log.arg) {
- ifelse(x == 0, log(phi + (1-phi) * exp(ans)), log1p(-phi) + ans)
- } else {
- ifelse(x == 0, phi + (1-phi) * exp(ans) , (1-phi) * exp(ans))
- }
-}
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 2
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ zero = .zero)
+ }, list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ Musual <- 2
+ y <- as.matrix(y)
+ if (any(y != round(y )))
+ stop("the response must be integer-valued")
+ if (any(y < 0))
+ stop("the response must not have negative values")
-pzibinom = function(q, size, prob, lower.tail = TRUE, log.p = FALSE, phi = 0) {
- ans = pbinom(q, size, prob, lower.tail = lower.tail, log.p = log.p)
- phi = rep(phi, length=length(ans))
- if (!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
- stop("argument 'phi' must be between 0 and 1 inclusive")
- phi + (1-phi) * ans
-}
+ extra$y0 = y0 = ifelse(y == 0, 1, 0)
+ extra$NOS = NOS = ncoly = ncol(y) # Number of species
+ extra$skip.these = skip.these = matrix(as.logical(y0), n, NOS)
+ mynames1 = if (ncoly == 1) "pobs0" else
+ paste("pobs0", 1:ncoly, sep = "")
+ mynames2 = if (ncoly == 1) "lambda" else
+ paste("lambda", 1:ncoly, sep = "")
+ predictors.names =
+ c(namesof(mynames1, .lpobs_0, earg = .epobs_0, tag = FALSE),
+ namesof(mynames2, .llambda, earg = .elambda, tag = FALSE))[
+ interleave.VGAM(Musual*NOS, M = Musual)]
-qzibinom = function(p, size, prob, lower.tail = TRUE, log.p = FALSE, phi = 0) {
- nn = max(length(p), length(size), length(prob), length(phi))
- p = rep(p, len=nn)
- size = rep(size, len=nn)
- prob = rep(prob, len=nn)
- phi = rep(phi, len=nn)
- if (!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
- stop("argument 'phi' must be between 0 and 1 inclusive")
- ans = p
- ans[p<=phi] = 0
- ans[p>phi] = qbinom((p[p>phi]-phi[p>phi])/(1-phi[p>phi]), size[p>phi],
- prob[p>phi], lower.tail = lower.tail, log.p = log.p)
- ans
-}
+ if (!length(etastart)) {
+ etastart =
+ cbind(theta2eta((0.5 + w*y0) / (1+w), .lpobs_0, earg = .epobs_0 ),
+ matrix(1, n, NOS)) # 1 here is any old value
+ for(spp. in 1:NOS) {
+ sthese = skip.these[, spp.]
+ etastart[!sthese, NOS+spp.] =
+ theta2eta(y[!sthese, spp.] / (-expm1(-y[!sthese, spp.])),
+ .llambda, earg = .elambda )
+ }
+ etastart = etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ }
+ }), list( .lpobs_0 = lpobs_0, .llambda = llambda,
+ .epobs_0 = epobs_0, .elambda = elambda ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ NOS = extra$NOS
+ Musual <- 2
-rzibinom = function(n, size, prob, phi = 0) {
- if (!is.Numeric(n, positive = TRUE, integer = TRUE, allow = 1))
- stop("n must be a single positive integer")
- ans = rbinom(n, size, prob)
- phi = rep(phi, len=length(ans))
- if (!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
- stop("argument 'phi' must be between 0 and 1 inclusive")
- ifelse(runif(n) < phi, 0, ans)
-}
+ pobs_0 = cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ .lpobs_0, earg = .epobs_0 ))
+ lambda = cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ .llambda, earg = .elambda ))
+ (1 - pobs_0) * lambda / (-expm1(-lambda))
+ }, list( .lpobs_0 = lpobs_0, .llambda = llambda,
+ .epobs_0 = epobs_0, .elambda = elambda ))),
+ last = eval(substitute(expression({
+ temp.names = c(rep( .lpobs_0 , len = NOS),
+ rep( .llambda , len = NOS))
+ temp.names = temp.names[interleave.VGAM(Musual*NOS, M = Musual)]
+ misc$link = temp.names
+ misc$expected = TRUE
+ misc$earg = vector("list", Musual * NOS)
+
+ names(misc$link) <-
+ names(misc$earg) <-
+ c(mynames1, mynames2)[interleave.VGAM(Musual*NOS, M = Musual)]
+
+ for(ii in 1:NOS) {
+ misc$earg[[Musual*ii-1]] = .epobs_0
+ misc$earg[[Musual*ii ]] = .elambda
+ }
+ }), list( .lpobs_0 = lpobs_0, .llambda = llambda,
+ .epobs_0 = epobs_0, .elambda = elambda ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ NOS = extra$NOS
+ Musual <- 2
+ pobs0 = cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ .lpobs_0, earg = .epobs_0))
+ lambda = cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ .llambda, earg = .elambda ))
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dzapois(x = y, pobs0 = pobs0, lambda = lambda, log = TRUE))
+ }
+ }, list( .lpobs_0 = lpobs_0, .llambda = llambda,
+ .epobs_0 = epobs_0, .elambda = elambda ))),
+ vfamily = c("zapoisson"),
+ deriv = eval(substitute(expression({
+ Musual <- 2
+ NOS = extra$NOS
+ y0 = extra$y0
+ skip = extra$skip.these
+ phimat = cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ .lpobs_0, earg = .epobs_0 ))
+ lambda = cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ .llambda, earg = .elambda ))
+ dl.dlambda = y / lambda + 1 / expm1(-lambda)
+ dl.dphimat = -1 / (1 - phimat) # For y > 0 obsns
+ for(spp. in 1:NOS) {
+ dl.dphimat[skip[, spp.], spp.] = 1 / phimat[skip[, spp.], spp.]
+ dl.dlambda[skip[, spp.], spp.] = 0
+ }
+ dlambda.deta = dtheta.deta(lambda, .llambda, earg = .elambda)
+ mu.phi0 = phimat
+ temp3 = if (.lpobs_0 == "logit") {
+ c(w) * (y0 - mu.phi0)
+ } else {
+ c(w) * dtheta.deta(mu.phi0, link = .lpobs_0 , earg = .epobs_0 ) *
+ dl.dphimat
+ }
+ ans <- cbind(temp3,
+ c(w) * dl.dlambda * dlambda.deta)
+ ans = ans[, interleave.VGAM(ncol(ans), M = Musual)]
+ ans
+ }), list( .lpobs_0 = lpobs_0, .llambda = llambda,
+ .epobs_0 = epobs_0, .elambda = elambda ))),
+ weight = eval(substitute(expression({
+ wz = matrix(0.0, n, Musual*NOS)
-dzinegbin = function(x, phi, size, prob = NULL, munb = NULL, log = FALSE) {
- if (length(munb)) {
- if (length(prob))
- stop("'prob' and 'munb' both specified")
- prob <- size/(size + munb)
- }
- if (!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
- rm(log)
- if (!is.logical(log.arg) || length(log.arg) != 1)
- stop("bad input for 'log.arg'")
- ans = dnbinom(x = x, size = size, prob = prob, log = log.arg)
- if (!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
- stop("argument 'phi' must be between 0 and 1 inclusive")
- phi = rep(phi, length=length(ans))
- if (log.arg)
- ifelse(x == 0, log(phi+(1-phi)*exp(ans)), log1p(-phi) + ans) else
- ifelse(x == 0, phi+(1-phi)* ans, (1-phi) * ans)
-}
+ temp5 = expm1(lambda)
+ ed2l.dlambda2 = (1 - phimat) * (temp5 + 1) *
+ (1 / lambda - 1 / temp5) / temp5
+ wz[, NOS+(1:NOS)] = w * ed2l.dlambda2 * dlambda.deta^2
-pzinegbin = function(q, phi, size, prob = NULL, munb = NULL) {
- if (length(munb)) {
- if (length(prob))
- stop("'prob' and 'munb' both specified")
- prob <- size/(size + munb)
+ tmp100 = mu.phi0 * (1.0 - mu.phi0)
+ tmp200 = if ( .lpobs_0 == "logit" && is.empty.list( .epobs_0 )) {
+ cbind(c(w) * tmp100)
+ } else {
+ cbind(c(w) * (1 / tmp100) *
+ dtheta.deta(mu.phi0, link = .lpobs_0, earg = .epobs_0)^2)
}
- ans = pnbinom(q=q, size = size, prob = prob)
- if (!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
- stop("argument 'phi' must be between 0 and 1 inclusive")
- phi + (1-phi) * ans
-}
-qzinegbin = function(p, phi, size, prob = NULL, munb = NULL) {
- if (length(munb)) {
- if (length(prob))
- stop("'prob' and 'munb' both specified")
- prob <- size/(size + munb)
+ if (FALSE)
+ for(ii in 1:NOS) {
+ index200 = abs(tmp200[, ii]) < .Machine$double.eps
+ if (any(index200)) {
+ tmp200[index200, ii] = 10.0 * .Machine$double.eps^(3/4)
+ }
}
- LLL = max(length(p), length(prob), length(phi), length(size))
- if (length(p) != LLL) p = rep(p, len = LLL)
- if (length(phi) != LLL) phi = rep(phi, len = LLL);
- if (length(prob) != LLL) prob = rep(prob, len = LLL)
- if (length(size) != LLL) size = rep(size, len = LLL);
-
- if (!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
- stop("argument 'phi' must be between 0 and 1 inclusive")
- ans = p
- ind4 = (p > phi)
- ans[!ind4] = 0
- ans[ ind4] = qnbinom(p = (p[ind4]-phi[ind4])/(1-phi[ind4]),
- size = size[ind4], prob = prob[ind4])
- ans
-}
-rzinegbin = function(n, phi, size, prob = NULL, munb = NULL) {
- if (length(munb)) {
- if (length(prob))
- stop("'prob' and 'munb' both specified")
- prob <- size/(size + munb)
- }
+ wz[, 1:NOS] = tmp200
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
- stop("bad input for argument 'n'") else n
+ wz = wz[, interleave.VGAM(ncol(wz), M = Musual)]
- ans = rnbinom(n = use.n, size = size, prob = prob)
- if (!is.Numeric(phi) || any(phi < 0) || any(phi > 1))
- stop("'argument phi' must be between 0 and 1 inclusive")
- phi = rep(phi, len=length(ans))
- ifelse(runif(use.n) < phi, rep(0, use.n), ans)
-}
+ wz
+ }), list( .lpobs_0 = lpobs_0,
+ .epobs_0 = epobs_0 ))))
+} # End of zapoisson
-zinegbinomial.control <- function(save.weight = TRUE, ...)
+
+zanegbinomial.control <- function(save.weight = TRUE, ...)
{
- list(save.weight=save.weight)
+ list(save.weight = save.weight)
}
- zinegbinomial =
- function(lphi = "logit", lmunb = "loge", lsize = "loge",
- ephi = list(), emunb = list(), esize = list(),
- iphi = NULL, isize = NULL, zero = -3,
- imethod = 1, shrinkage.init = 0.95,
- nsimEIM = 200)
+ zanegbinomial =
+ function(lpobs0 = "logit", lmunb = "loge", lsize = "loge",
+ epobs0 = list(), emunb = list(), esize = list(),
+ ipobs0 = NULL, isize = NULL,
+ zero = c(-1, -3),
+ imethod = 1,
+ nsimEIM = 250,
+ shrinkage.init = 0.95)
{
- if (length(iphi) && (!is.Numeric(iphi, positiv = TRUE) ||
- any(iphi >= 1)))
- stop("'iphi' must contain values in (0,1)")
- if (length(isize) && !is.Numeric(isize, positiv = TRUE))
- stop("'isize' must contain positive values only")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
- imethod > 3) stop("argument 'imethod' must be 1, 2 or 3")
- if (!is.Numeric(nsimEIM, allow = 1, integ = TRUE))
- stop("'nsimEIM' must be a positive integer")
- if (nsimEIM <= 10)
- warning("'nsimEIM' should be greater than 10, say")
- if (!is.Numeric(shrinkage.init, allow = 1) || shrinkage.init < 0 ||
- shrinkage.init > 1) stop("bad input for argument 'shrinkage.init'")
-
- if (mode(lmunb) != "character" && mode(lmunb) != "name")
- lmunb = as.character(substitute(lmunb))
- if (mode(lsize) != "character" && mode(lsize) != "name")
- lsize = as.character(substitute(lsize))
- if (mode(lphi) != "character" && mode(lphi) != "name")
- lphi = as.character(substitute(lphi))
- if (!is.list(ephi)) ephi = list()
- if (!is.list(emunb)) emunb = list()
- if (!is.list(esize)) esize = list()
-
- new("vglmff",
- blurb = c("Zero-inflated negative binomial\n\n",
- "Links: ",
- namesof("phi", lphi , earg = ephi, tag = FALSE), ", ",
- namesof("munb", lmunb, earg = emunb, tag = FALSE), ", ",
- namesof("size", lsize, earg = esize, tag = FALSE), "\n",
- "Mean: (1-phi) * munb"),
- constraints = eval(substitute(expression({
-
- dotzero <- .zero
- Musual <- 3
- eval(negzero.expression)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- Musual <- 3
- y = as.matrix(y)
- extra$NOS = NOS = ncoly = ncol(y) # Number of species
- if (length(dimnames(y)))
- extra$dimnamesy2 = dimnames(y)[[2]]
-
- mynames1 = if (NOS == 1) "phi" else paste("phi", 1:NOS, sep = "")
- mynames2 = if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = "")
- mynames3 = if (NOS == 1) "size" else paste("size", 1:NOS, sep = "")
- predictors.names =
- c(namesof(mynames1, .lphi, earg = .ephi, tag = FALSE),
- namesof(mynames2, .lmunb, earg = .emunb, tag = FALSE),
- namesof(mynames3, .lsize, earg = .esize, tag = FALSE))
- predictors.names =
- predictors.names[interleave.VGAM(Musual*NOS, M = Musual)]
- if (!length(etastart)) {
- mu.init = if ( .imethod == 3) {
- y + 1/16
- } else {
- mu.init = y
- for(iii in 1:ncol(y)) {
- index = (y[,iii] > 0)
- mu.init[,iii] = if ( .imethod == 2)
- weighted.mean(y[index,iii], w=w[index]) else
- median(rep(y[index,iii], w[index])) + 1/8
- }
- (1- .sinit) * (y+1/16) + .sinit * mu.init
- }
-
- phi.init = if (length( .iphi))
- matrix( .iphi, n, ncoly, byrow = TRUE) else {
- phi.init = y
- for(iii in 1:ncol(y))
- phi.init[,iii] = sum(w[y[,iii] == 0]) / sum(w)
- phi.init[phi.init <= 0.02] = 0.02 # Last resort
- phi.init[phi.init >= 0.98] = 0.98 # Last resort
- phi.init
- }
-
- kay.init =
- if ( is.Numeric( .isize )) {
- matrix( .isize, nr=n, nc=ncoly, byrow = TRUE)
- } else {
- zinegbin.Loglikfun = function(kval, y, x, w, extraargs) {
- index = (y == 0)
- phivec = extraargs$phi
- muvec = extraargs$mu
- tmp8 = phivec[index] + (1.0-phivec[index]) *
- dnbinom(y[index], mu= muvec[index], size=kval)
- ell0 = log(tmp8)
- ell1 = log1p(-phivec[!index]) + dnbinom(y[!index],
- mu= muvec[!index], size=kval, log = TRUE)
- sum(w[index] * ell0) + sum(w[!index] * ell1)
- }
- k.grid = 2^((-6):6)
- kay.init = matrix(0, nr=n, nc=NOS)
- for(spp. in 1:NOS) {
- kay.init[,spp.] = getMaxMin(k.grid,
- objfun = zinegbin.Loglikfun,
- y=y[,spp.], x = x, w = w,
- extraargs= list(phi = phi.init[,spp.],
- mu=mu.init[,spp.]))
- }
- kay.init
- }
-
- etastart = cbind(theta2eta(phi.init, .lphi, earg = .ephi),
- theta2eta(mu.init, .lmunb, earg = .emunb),
- theta2eta(kay.init, .lsize, earg = .esize))
- etastart =
- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
- }
- }), list( .lphi = lphi, .lmunb = lmunb, .lsize = lsize,
- .ephi = ephi, .emunb = emunb, .esize = esize,
- .iphi = iphi, .isize = isize,
- .sinit = shrinkage.init, .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- NOS = extra$NOS
- phi = eta2theta(eta[,3*(1:NOS)-2, drop = FALSE],
- .lphi, earg = .ephi )
- munb = eta2theta(eta[,3*(1:NOS)-1, drop = FALSE],
- .lmunb, earg = .emunb )
- fv.matrix = (1 - phi) * munb
- if (length(extra$dimnamesy2))
- dimnames(fv.matrix) = list(dimnames(phi)[[1]], extra$dimnamesy2)
- fv.matrix
- }, list( .lphi = lphi, .lsize = lsize, .lmunb = lmunb,
- .ephi = ephi, .esize = esize, .emunb = emunb ))),
- last = eval(substitute(expression({
- misc$link = c(rep( .lphi, length = NOS),
- rep( .lmunb, length = NOS),
- rep( .lsize, length = NOS))
- temp.names = c(mynames1, mynames2, mynames3)
- temp.names = temp.names[interleave.VGAM(Musual*NOS, M = Musual)]
- names(misc$link) = temp.names
- misc$earg = vector("list", 3*NOS)
- names(misc$earg) = temp.names
- for(ii in 1:NOS) {
- misc$earg[[3*ii-2]] = .ephi
- misc$earg[[3*ii-1]] = .emunb
- misc$earg[[3*ii ]] = .esize
- }
- misc$imethod = .imethod
- misc$nsimEIM = .nsimEIM
- misc$expected = TRUE
- misc$Musual = Musual
- if (intercept.only) {
- phi = eta2theta(eta[1,3*(1:NOS)-2], .lphi, earg = .ephi)
- munb = eta2theta(eta[1,3*(1:NOS)-1], .lmunb, earg = .emunb )
- kval = eta2theta(eta[1,3*(1:NOS)], .lsize, earg = .esize)
- misc$prob0 = phi + (1-phi) * (kval / (kval + munb))^kval # P(Y=0)
- }
- }), list( .lphi = lphi, .lmunb = lmunb, .lsize = lsize,
- .ephi = ephi, .emunb = emunb, .esize = esize,
- .nsimEIM = nsimEIM, .imethod = imethod ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- NOS = extra$NOS
- phi = eta2theta(eta[,3*(1:NOS)-2, drop = FALSE], .lphi, earg = .ephi )
- munb = eta2theta(eta[,3*(1:NOS)-1, drop = FALSE], .lmunb, earg = .emunb )
- kmat = eta2theta(eta[,3*(1:NOS), drop = FALSE], .lsize, earg = .esize )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dzinegbin(x = y, phi = phi, munb = munb,
- size = kmat, log = TRUE))
- }
- }, list( .lphi = lphi, .lmunb = lmunb, .lsize = lsize,
- .ephi = ephi, .emunb = emunb, .esize = esize ))),
- vfamily = c("zinegbinomial"),
- deriv = eval(substitute(expression({
- Musual <- 3
- NOS = extra$NOS
- phi = eta2theta(eta[,3*(1:NOS)-2, drop = FALSE],
- .lphi, earg = .ephi )
- munb = eta2theta(eta[,3*(1:NOS)-1, drop = FALSE],
- .lmunb, earg = .emunb )
- kmat = eta2theta(eta[,3*(1:NOS), drop = FALSE],
- .lsize, earg = .esize )
- dphi.deta = dtheta.deta(phi, .lphi, earg = .ephi )
- dmunb.deta = dtheta.deta(munb, .lmunb, earg = .emunb )
- dk.deta = dtheta.deta(kmat, .lsize, earg = .esize )
- dthetas.detas =
- (cbind(dphi.deta,
- dmunb.deta,
- dk.deta))[, interleave.VGAM(Musual*NOS, M = Musual)]
-
- d3 = deriv3(~ log(phi. + (1 - phi.) * (kmat. /(kmat. + munb. ))^kmat.),
- c("phi.", "munb.", "kmat."), hessian = FALSE)
- dl.dthetas = matrix(0, n, M) # M = 3*NOS; for all species
- for(spp. in 1:NOS) {
- index = (y[,spp.] == 0)
- if (!sum(index) || !sum(!index))
- stop("must have some 0s AND some positive counts in the data")
-
- yvec. = y[index,spp.]
- kmat. = kmat[index,spp.]
- munb. = munb[index,spp.]
- phi. = phi[index,spp.]
- eval.d3 = eval(d3) # Evaluated for one species
- dl.dthetas[index,(3*spp.-2):(3*spp.)] = attr(eval.d3, "gradient")
-
- yvec. = y[!index,spp.]
- kmat. = kmat[!index,spp.]
- munb. = munb[!index,spp.]
- phi. = phi[!index,spp.]
- dl.dphi = -1/(1-phi.)
- dl.dmunb = yvec. / munb. - (yvec. +kmat.)/(kmat.+munb.)
- dl.dk = digamma(yvec. +kmat.) - digamma(kmat.) -
- (yvec. +kmat.)/(munb.+kmat.) + 1 +
- log(kmat./(kmat.+munb.))
- dl.dthetas[!index,(3*spp.-2):(3*spp.)] =
- cbind(dl.dphi, dl.dmunb, dl.dk)
- }
- w * dl.dthetas * dthetas.detas
- }), list( .lphi = lphi, .lmunb = lmunb, .lsize = lsize,
- .ephi = ephi, .emunb = emunb, .esize = esize ))),
- weight = eval(substitute(expression({
-
- wz = matrix(0, n, 3*(M-1))
- ind8 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- ind1 = iam(NA, NA, M = 3, both = TRUE, diag = TRUE)
- for(spp. in 1:NOS) {
- run.varcov = 0
- sdl.dthetas = matrix(0, n, 3)
- for(ii in 1:( .nsimEIM )) {
- ysim = rzinegbin(n=n, phi = phi[,spp.],
- size=kmat[,spp.], mu=munb[,spp.])
- index = (ysim == 0)
-
- yvec. = ysim[index]
- kmat. = kmat[index,spp.]
- munb. = munb[index,spp.]
- phi. = phi[index,spp.]
- eval.d3 = eval(d3) # Evaluated for one species
- sdl.dthetas[index,] = attr(eval.d3, "gradient")
-
- yvec. = ysim[!index]
- kmat. = kmat[!index,spp.]
- munb. = munb[!index,spp.]
- phi. = phi[!index,spp.]
- dl.dphi = -1/(1-phi.)
- dl.dmunb = yvec. / munb. - (yvec. +kmat.)/(kmat.+munb.)
- dl.dk = digamma(yvec. +kmat.) - digamma(kmat.) -
- (yvec. +kmat.)/(munb.+kmat.) + 1 +
- log(kmat./(kmat.+munb.))
- sdl.dthetas[!index,] = cbind(dl.dphi, dl.dmunb, dl.dk)
- temp3 = sdl.dthetas
- run.varcov = ((ii-1) * run.varcov +
- temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
- }
- wz1 = if (intercept.only)
- matrix(colMeans(run.varcov),
- nr=n, nc=ncol(run.varcov), byrow = TRUE) else run.varcov
-
- wz1 = wz1 * dthetas.detas[,3*(spp. -1) + ind1$row] *
- dthetas.detas[,3*(spp. -1) + ind1$col]
-
- for(jay in 1:3)
- for(kay in jay:3) {
- cptr = iam((spp.-1)*3+jay, (spp.-1)*3+kay, M = M)
- wz[,cptr] = wz1[,iam(jay, kay, M = 3)]
- }
- } # End of for(spp.) loop
- c(w) * wz
- }), list( .lphi = lphi, .ephi = ephi, .nsimEIM = nsimEIM ))))
-}
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ positive = TRUE, integer.valued = TRUE))
+ stop("argument 'nsimEIM' must be a positive integer")
+ if (nsimEIM <= 30)
+ warning("argument 'nsimEIM' should be greater than 30, say")
- zipoissonff <- function(llambda = "loge", lprobp = "logit",
- elambda = list(), eprobp = list(),
- ilambda = NULL, iprobp = NULL, imethod = 1,
- shrinkage.init = 0.8, zero = -2)
+ if (length(ipobs0) && (!is.Numeric(ipobs0, positive = TRUE) ||
+ max(ipobs0) >= 1))
+ stop("If given, argument 'ipobs0' must contain values in (0,1) only")
+ if (length(isize) && !is.Numeric(isize, positive = TRUE))
+ stop("If given, argument 'isize' must contain positive values only")
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+ if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+ shrinkage.init < 0 ||
+ shrinkage.init > 1)
+ stop("bad input for argument 'shrinkage.init'")
+
+ if (mode(lmunb) != "character" && mode(lmunb) != "name")
+ lmunb = as.character(substitute(lmunb))
+ if (mode(lsize) != "character" && mode(lsize) != "name")
+ lsize = as.character(substitute(lsize))
+ if (mode(lpobs0) != "character" && mode(lpobs0) != "name")
+ lpobs0 = as.character(substitute(lpobs0))
+
+ if (!is.list(epobs0)) epobs0 = list()
+ if (!is.list(emunb)) emunb = list()
+ if (!is.list(esize)) esize = list()
+
+
+
+ new("vglmff",
+ blurb = c("Zero-altered negative binomial (Bernoulli and\n",
+ "positive-negative binomial conditional model)\n\n",
+ "Links: ",
+ namesof("pobs0", lpobs0, earg = epobs0, tag = FALSE), ", ",
+ namesof("munb", lmunb, earg = emunb, tag = FALSE), ", ",
+ namesof("size", lsize, earg = esize, tag = FALSE), "\n",
+ "Mean: (1 - pobs0) * munb / (1 - (size / (size + ",
+ "munb))^size)"),
+ constraints = eval(substitute(expression({
+
+ dotzero <- .zero
+ Musual <- 3
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ Musual <- 3
+ y <- as.matrix(y)
+ extra$NOS = NOS = ncoly = ncol(y) # Number of species
+ M = Musual * ncoly #
+
+ if (any(y != round(y)))
+ stop("the response must be integer-valued")
+ if (any(y < 0))
+ stop("the response must not have negative values")
+
+ mynames1 = if (NOS == 1) "pobs0" else paste("pobs0", 1:NOS, sep = "")
+ mynames2 = if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = "")
+ mynames3 = if (NOS == 1) "size" else paste("size", 1:NOS, sep = "")
+ predictors.names =
+ c(namesof(mynames1, .lpobs0 , earg = .epobs0 , tag = FALSE),
+ namesof(mynames2, .lmunb , earg = .emunb , tag = FALSE),
+ namesof(mynames3, .lsize , earg = .esize , tag = FALSE))[
+ interleave.VGAM(Musual*NOS, M = Musual)]
+
+
+ extra$y0 = y0 = ifelse(y == 0, 1, 0)
+ extra$skip.these = skip.these = matrix(as.logical(y0), n, NOS)
+
+
+ if (!length(etastart)) {
+ mu.init = y
+ for(iii in 1:ncol(y)) {
+ index.posy = (y[, iii] > 0)
+ use.this = if ( .imethod == 2) {
+ weighted.mean(y[index.posy, iii], w[index.posy])
+ } else {
+ median(rep(y[index.posy, iii], w[index.posy])) + 1/2
+ }
+ mu.init[ index.posy, iii] = (1 - .sinit ) * y[index.posy, iii] +
+ .sinit * use.this
+ mu.init[!index.posy, iii] = use.this
+ max.use.this = 7 * use.this + 10
+ vecTF = (mu.init[, iii] > max.use.this)
+ if (any(vecTF))
+ mu.init[vecTF, iii] = max.use.this
+ }
+
+ pnb0 = matrix(if (length( .ipobs0 )) .ipobs0 else -1,
+ nrow = n, ncol = NOS, byrow = TRUE)
+ for(spp. in 1:NOS) {
+ if (any(pnb0[, spp.] < 0)) {
+ index.y0 = y[, spp.] < 0.5
+ pnb0[, spp.] = max(min(sum(index.y0)/n, 0.97), 0.03)
+ }
+ }
+
+
+ if ( is.Numeric( .isize )) {
+ kmat0 = matrix( .isize , nrow = n, ncol = ncoly, byrow = TRUE)
+ } else {
+ posnegbinomial.Loglikfun = function(kmat, y, x, w, extraargs) {
+ munb = extraargs
+ sum(w * dposnegbin(x = y, munb = munb, size = kmat,
+ log = TRUE))
+ }
+ k.grid = 2^((-6):6)
+ kmat0 = matrix(0, nrow = n, ncol = NOS)
+ for(spp. in 1:NOS) {
+ index.posy = y[, spp.] > 0
+ posy = y[index.posy, spp.]
+ kmat0[, spp.] = getMaxMin(k.grid,
+ objfun = posnegbinomial.Loglikfun,
+ y = posy, x = x[index.posy,],
+ w = w[index.posy],
+ extraargs = mu.init[index.posy, spp.])
+ }
+ }
+
+ etastart = cbind(theta2eta(pnb0, .lpobs0 , earg = .epobs0 ),
+ theta2eta(mu.init, .lmunb , earg = .emunb ),
+ theta2eta(kmat0, .lsize , earg = .esize ))
+ etastart = etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ } # End of if (!length(etastart))
+
+
+ }), list( .lpobs0 = lpobs0, .lmunb = lmunb, .lsize = lsize,
+ .epobs0 = epobs0, .emunb = emunb, .esize = esize,
+ .ipobs0 = ipobs0, .isize = isize,
+ .imethod = imethod, .sinit = shrinkage.init ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ Musual <- 3
+ NOS <- extra$NOS
+ phi0 <- eta2theta(eta[, Musual*(1:NOS)-2], .lpobs0 , earg = .epobs0 )
+ munb <- eta2theta(eta[, Musual*(1:NOS)-1], .lmunb , earg = .emunb )
+ kmat <- eta2theta(eta[, Musual*(1:NOS) ], .lsize , earg = .esize )
+ pnb0 <- (kmat / (kmat + munb))^kmat # p(0) from negative binomial
+ (1 - phi0) * munb / (1 - pnb0)
+ }, list( .lpobs0 = lpobs0, .lsize = lsize, .lmunb = lmunb,
+ .epobs0 = epobs0, .emunb = emunb, .esize = esize ))),
+ last = eval(substitute(expression({
+ misc$link =
+ c(rep( .lpobs0 , length = NOS),
+ rep( .lmunb , length = NOS),
+ rep( .lsize , length = NOS))[interleave.VGAM(Musual*NOS,
+ M = Musual)]
+ temp.names = c(mynames1,
+ mynames2,
+ mynames3)[interleave.VGAM(Musual*NOS, M = Musual)]
+ names(misc$link) = temp.names
+
+ misc$earg = vector("list", Musual*NOS)
+ names(misc$earg) = temp.names
+ for(ii in 1:NOS) {
+ misc$earg[[Musual*ii-2]] = .epobs0
+ misc$earg[[Musual*ii-1]] = .emunb
+ misc$earg[[Musual*ii ]] = .esize
+ }
+
+ misc$nsimEIM = .nsimEIM
+ misc$imethod = .imethod
+ misc$ipobs0 = .ipobs0
+ misc$isize = .isize
+ }), list( .lpobs0 = lpobs0, .lmunb = lmunb, .lsize = lsize,
+ .epobs0 = epobs0, .emunb = emunb, .esize = esize,
+ .ipobs0 = ipobs0, .isize = isize,
+ .nsimEIM = nsimEIM,
+ .imethod = imethod ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ NOS = extra$NOS
+ Musual <- 3
+ phi0 = eta2theta(eta[, Musual*(1:NOS)-2], .lpobs0 , earg = .epobs0 )
+ munb = eta2theta(eta[, Musual*(1:NOS)-1], .lmunb , earg = .emunb )
+ kmat = eta2theta(eta[, Musual*(1:NOS) ], .lsize , earg = .esize )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * dzanegbin(x = y, pobs0 = phi0, munb = munb, size = kmat,
+ log = TRUE))
+ }
+ }, list( .lpobs0 = lpobs0, .lmunb = lmunb, .lsize = lsize,
+ .epobs0 = epobs0, .emunb = emunb, .esize = esize ))),
+ vfamily = c("zanegbinomial"),
+ deriv = eval(substitute(expression({
+ Musual <- 3
+ NOS = extra$NOS
+ y0 = extra$y0
+
+ phi0 = eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
+ .lpobs0 , earg = .epobs0 )
+ munb = eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ .lmunb , earg = .emunb )
+ kmat = eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
+ .lsize , earg = .esize )
+ skip = extra$skip.these
+
+
+ dphi0.deta = dtheta.deta(phi0, .lpobs0 , earg = .epobs0 )
+ dmunb.deta = dtheta.deta(munb, .lmunb , earg = .emunb )
+ dsize.deta = dtheta.deta(kmat, .lsize , earg = .esize )
+
+
+ tempk = kmat / (kmat + munb)
+ tempm = munb / (kmat + munb)
+ prob0 = tempk^kmat
+ oneminusf0 = 1 - prob0
+ df0.dmunb = -tempk * prob0
+ df0.dkmat = prob0 * (tempm + log(tempk))
+
+
+ dl.dphi0 = -1 / (1 - phi0)
+ dl.dmunb = y / munb - (y + kmat) / (munb + kmat) +
+ df0.dmunb / oneminusf0
+ dl.dsize = digamma(y + kmat) - digamma(kmat) -
+ (y + kmat)/(munb + kmat) + 1 + log(tempk) +
+ df0.dkmat / oneminusf0
+
+
+
+ dl.dphi0[y == 0] = 1 / phi0[y == 0] # Do it in one line
+ skip = extra$skip.these
+ for(spp. in 1:NOS) {
+ dl.dsize[skip[, spp.], spp.] =
+ dl.dmunb[skip[, spp.], spp.] = 0
+ }
+
+ dl.deta23 = c(w) * cbind(dl.dmunb * dmunb.deta,
+ dl.dsize * dsize.deta)
+
+
+ muphi0 = phi0
+ dl.deta1 = if ( .lpobs0 == "logit") {
+ c(w) * (y0 - muphi0)
+ } else {
+ c(w) * dphi0.deta * (y0 / muphi0 - 1) / (1 - muphi0)
+ }
+ ans = cbind(dl.deta1, dl.deta23)
+ ans = ans[, interleave.VGAM(ncol(ans), M = Musual)]
+ ans
+ }), list( .lpobs0 = lpobs0 , .lmunb = lmunb , .lsize = lsize ,
+ .epobs0 = epobs0 , .emunb = emunb , .esize = esize ))),
+
+ weight = eval(substitute(expression({
+
+ six = dimm(Musual)
+ wz =
+ run.varcov = matrix(0.0, n, six*NOS-1)
+ Musualm1 = Musual - 1
+
+
+
+
+
+
+
+ ind2 = iam(NA, NA, M = Musual - 1, both = TRUE, diag = TRUE)
+
+
+ for(ii in 1:( .nsimEIM )) {
+ ysim = rzanegbin(n = n*NOS, pobs0 = phi0,
+ size = kmat, mu = munb)
+ dim(ysim) = c(n, NOS)
+
+
+
+
+ dl.dphi0 = -1 / (1 - phi0)
+ dl.dmunb = ysim / munb - (ysim + kmat) / (munb + kmat) +
+ df0.dmunb / oneminusf0
+ dl.dsize = digamma(ysim + kmat) - digamma(kmat) -
+ (ysim + kmat)/(munb + kmat) + 1 + log(tempk) +
+ df0.dkmat / oneminusf0
+
+
+
+
+ dl.dphi0[ysim == 0] = 1 / phi0[ysim == 0] # Do it in one line
+ ysim0 = ifelse(ysim == 0, 1, 0)
+ skip.sim = matrix(as.logical(ysim0), n, NOS)
+ for(spp. in 1:NOS) {
+ dl.dsize[skip.sim[, spp.], spp.] =
+ dl.dmunb[skip.sim[, spp.], spp.] = 0
+ }
+
+
+ for(kk in 1:NOS) {
+ temp2 = cbind(dl.dmunb[, kk] * dmunb.deta[, kk],
+ dl.dsize[, kk] * dsize.deta[, kk])
+ small.varcov = temp2[, ind2$row.index] *
+ temp2[, ind2$col.index]
+
+
+
+
+ run.varcov[, ((kk-1)*Musual+2):(kk*Musual)] =
+ run.varcov[, ((kk-1)*Musual+2):(kk*Musual)] +
+ c(small.varcov[, 1:Musualm1])
+ run.varcov[, M + (kk-1)*Musual + 2] =
+ run.varcov[, M + (kk-1)*Musual + 2] +
+ c(small.varcov[, Musualm1 + 1])
+ } # kk; end of NOS
+ } # ii; end of nsimEIM
+
+
+ run.varcov = cbind(run.varcov / .nsimEIM )
+ run.varcov = if (intercept.only)
+ matrix(colMeans(run.varcov),
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
+
+
+
+
+ wzind1 = sort(c( Musual*(1:NOS) - 1,
+ Musual*(1:NOS) - 0,
+ M + Musual*(1:NOS) - 1))
+ wz[, wzind1] = c(w) * run.varcov[, wzind1]
+
+
+
+
+ tmp100 = muphi0 * (1 - muphi0)
+ tmp200 = if ( .lpobs0 == "logit") {
+ cbind(c(w) * tmp100)
+ } else {
+ c(w) * cbind(dphi0.deta^2 / tmp100)
+ }
+ for(ii in 1:NOS) {
+ index200 = abs(tmp200[, ii]) < .Machine$double.eps
+ if (any(index200)) {
+ tmp200[index200, ii] = .Machine$double.eps # Diagonal 0's are bad
+ }
+ }
+ wz[, Musual*(1:NOS)-2] = tmp200
+
+
+ wz
+ }), list( .lpobs0 = lpobs0,
+ .epobs0 = epobs0,
+ .nsimEIM = nsimEIM ))))
+} # End of zanegbinomial()
+
+
+
+
+
+
+
+
+
+
+ if (FALSE)
+rposnegbin = function(n, munb, size) {
+ if (!is.Numeric(size, positive = TRUE))
+ stop("argument 'size' must be positive")
+ if (!is.Numeric(munb, positive = TRUE))
+ stop("argument 'munb' must be positive")
+ if (!is.Numeric(n, positive = TRUE, integer.valued = TRUE,
+ allowable.length = 1))
+ stop("argument 'n' must be a positive integer")
+ ans = rnbinom(n=n, mu = munb, size=size)
+ munb = rep(munb, length = n)
+ size = rep(size, length = n)
+ index = ans == 0
+ while(any(index)) {
+ more = rnbinom(n=sum(index), mu = munb[index], size=size[index])
+ ans[index] = more
+ index = ans == 0
+ }
+ ans
+}
+
+ if (FALSE)
+dposnegbin = function(x, munb, size, log = FALSE) {
+ if (!is.Numeric(size, positive = TRUE))
+ stop("argument 'size' must be positive")
+ if (!is.Numeric(munb, positive = TRUE))
+ stop("argument 'munb' must be positive")
+ ans = dnbinom(x = x, mu = munb, size=size, log=log)
+ ans0 = dnbinom(x=0, mu = munb, size=size, log = FALSE)
+ ans = if (log) ans - log1p(-ans0) else ans/(1-ans0)
+ ans[x == 0] = if (log) -Inf else 0
+ ans
+}
+
+
+
+
+
+
+
+
+
+
+
+ zipoisson = function(lpstr0 = "logit", llambda = "loge",
+ epstr0 = list(), elambda = list(),
+ ipstr0 = NULL, ilambda = NULL,
+ imethod = 1,
+ shrinkage.init = 0.8, zero = NULL)
{
- lprobp. <- lprobp
- eprobp. <- eprobp
- iprobp. <- iprobp
+ if (mode(lpstr0) != "character" && mode(lpstr0) != "name")
+ lpstr0 = as.character(substitute(lpstr0))
if (mode(llambda) != "character" && mode(llambda) != "name")
- llambda <- as.character(substitute(llambda))
- if (mode(lprobp.) != "character" && mode(lprobp.) != "name")
- lphi <- as.character(substitute(lprobp.))
+ llambda = as.character(substitute(llambda))
- if (is.Numeric(ilambda))
- if (!is.Numeric(ilambda, posit = TRUE))
- stop("'ilambda' values must be positive")
- if (is.Numeric(iprobp.))
- if (!is.Numeric(iprobp., posit = TRUE) ||
- any(iprobp. >= 1))
- stop("'iprobp' values must be inside the interval (0,1)")
- if (!is.list(elambda)) elambda <- list()
- if (!is.list(eprobp.)) eprobp. <- list()
+ lpstr00 <- lpstr0
+ epstr00 <- epstr0
+ ipstr00 <- ipstr0
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
- imethod > 2)
+ if (length(ipstr00))
+ if (!is.Numeric(ipstr00, positive = TRUE) ||
+ any(ipstr00 >= 1))
+ stop("argument 'ipstr0' values must be inside the interval (0,1)")
+ if (length(ilambda))
+ if (!is.Numeric(ilambda, positive = TRUE))
+ stop("argument 'ilambda' values must be positive")
+
+ if (!is.list(epstr00)) epstr00 = list()
+ if (!is.list(elambda)) elambda = list()
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
stop("argument 'imethod' must be 1 or 2")
- if (!is.Numeric(shrinkage.init, allow = 1) ||
- shrinkage.init < 0 ||
- shrinkage.init > 1)
+
+ if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+ shrinkage.init < 0 ||
+ shrinkage.init > 1)
stop("bad input for argument 'shrinkage.init'")
+
new("vglmff",
blurb = c("Zero-inflated Poisson\n\n",
"Links: ",
- namesof("lambda", llambda, earg = elambda), ", ",
- namesof("probp", lprobp., earg = eprobp.), "\n",
- "Mean: probp * lambda"),
+ namesof("pstr0", lpstr00, earg = epstr00 ), ", ",
+ namesof("lambda", llambda, earg = elambda ), "\n",
+ "Mean: (1 - pstr0) * lambda"),
+
constraints = eval(substitute(expression({
dotzero <- .zero
Musual <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
+
infos = eval(substitute(function(...) {
list(Musual = 2,
zero = .zero)
}, list( .zero = zero ))),
initialize = eval(substitute(expression({
- y <- cbind(y)
-
+ y <- as.matrix(y)
ncoly <- ncol(y)
+
Musual <- 2
extra$ncoly <- ncoly
extra$Musual <- Musual
M <- Musual * ncoly
- if (any(round(y) != y))
- stop("responses must be integer-valued")
+ if (any(round(y) != y))
+ stop("integer-valued responses only allowed for ",
+ "the 'zipoisson' family")
- mynames1 <- paste("lambda", if (ncoly > 1) 1:ncoly else "", sep = "")
- mynames2 <- paste("probp", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames1 <- paste("pstr0", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames2 <- paste("lambda", if (ncoly > 1) 1:ncoly else "", sep = "")
predictors.names <-
- c(namesof(mynames1, .llambda, earg = .elambda, tag = FALSE),
- namesof(mynames2, .lprobp., earg = .eprobp., tag = FALSE))
+ c(namesof(mynames1, .lpstr00 , earg = .epstr00 , tag = FALSE),
+ namesof(mynames2, .llambda , earg = .elambda , tag = FALSE))[
+ interleave.VGAM(M, M = Musual)]
- predictors.names <- predictors.names[interleave.VGAM(M, M = Musual)]
- if (!length(etastart)) {
- mat1 <- matrix(if (length( .ilambda )) theta2eta( .ilambda,
- .llambda, earg = .elambda) else 0,
- n, ncoly, byrow = TRUE)
- mat2 <- matrix(if (length( .iprobp. )) theta2eta( .iprobp.,
- .lprobp., earg = .eprobp.) else 0,
- n, ncoly, byrow = TRUE)
+ if (!length(etastart)) {
- for (jay in 1:ncoly) {
- yjay <- y[, jay]
+ matL <- matrix(if (length( .ilambda )) .ilambda else 0,
+ n, ncoly, byrow = TRUE)
+ matP <- matrix(if (length( .ipstr00 )) .ipstr00 else 0,
+ n, ncoly, byrow = TRUE)
- Phi.init <- 0.75 * sum(w[yjay > 0]) / sum(w)
- Phi.init[Phi.init <= 0.02] = 0.02 # Last resort
- Phi.init[Phi.init >= 0.98] = 0.98 # Last resort
- if ( length(mustart)) {
- mustart <- matrix(mustart, n, ncoly) # Make sure right size
- Lambda.init <- mustart / (1 - Phi.init)
- } else if ( .imethod == 2) {
- mymean <- weighted.mean(yjay[yjay > 0], w[yjay > 0]) + 1/16
- Lambda.init <- (1 - .sinit) * (yjay + 1/8) + .sinit * mymean
- } else {
- use.this <- median(yjay[yjay > 0]) + 1 / 16
- Lambda.init <- (1 - .sinit) * (yjay + 1/8) + .sinit * use.this
- }
+ for (spp. in 1:ncoly) {
+ yvec <- y[, spp.]
- zipois.Loglikfun <- function(phival, y, x, w, extraargs) {
- sum(w * dzipois(x = y, phi = phival,
- lambda = extraargs$lambda,
- log = TRUE))
- }
- phi.grid <- seq(0.02, 0.98, len = 21)
- Phimat.init <- getMaxMin(phi.grid,
- objfun = zipois.Loglikfun,
- y = y, x = x, w = w,
- extraargs = list(lambda = Lambda.init))
- if (length(mustart)) {
- Lambda.init <- Lambda.init / (1 - Phimat.init)
- }
+ Phi.init <- 1 - 0.85 * sum(w[yvec > 0]) / sum(w)
+ Phi.init[Phi.init <= 0.02] = 0.02 # Last resort
+ Phi.init[Phi.init >= 0.98] = 0.98 # Last resort
+
+ if ( length(mustart)) {
+ mustart <- matrix(mustart, n, ncoly) # Make sure right size
+ Lambda.init <- mustart / (1 - Phi.init)
+ } else if ( .imethod == 2) {
+ mymean <- weighted.mean(yvec[yvec > 0], w[yvec > 0]) + 1/16
+ Lambda.init <- (1 - .sinit) * (yvec + 1/8) + .sinit * mymean
+ } else {
+ use.this <- median(yvec[yvec > 0]) + 1 / 16
+ Lambda.init <- (1 - .sinit) * (yvec + 1/8) + .sinit * use.this
+ }
+
+ zipois.Loglikfun <- function(phival, y, x, w, extraargs) {
+ sum(w * dzipois(x = y, pstr0 = phival,
+ lambda = extraargs$lambda,
+ log = TRUE))
+ }
+ phi.grid <- seq(0.02, 0.98, len = 21)
+ Phimat.init <- getMaxMin(phi.grid,
+ objfun = zipois.Loglikfun,
+ y = y, x = x, w = w,
+ extraargs = list(lambda = Lambda.init))
+
+ if (length(mustart)) {
+ Lambda.init <- Lambda.init / (1 - Phimat.init)
+ }
+
+ if (!length( .ipstr00 ))
+ matP[, spp.] <- Phimat.init
+ if (!length( .ilambda ))
+ matL[, spp.] <- Lambda.init
+ } # spp.
+
+ etastart <- cbind(theta2eta(matP, .lpstr00, earg = .epstr00 ),
+ theta2eta(matL, .llambda, earg = .elambda ))[,
+ interleave.VGAM(M, M = Musual)]
+ mustart <- NULL # Since etastart has been computed.
+ } # End of !length(etastart)
+ }), list( .lpstr00 = lpstr00, .llambda = llambda,
+ .epstr00 = epstr00, .elambda = elambda,
+ .ipstr00 = ipstr00, .ilambda = ilambda,
+ .imethod = imethod, .sinit = shrinkage.init ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ phimat = eta2theta(eta[, c(TRUE, FALSE)], .lpstr00 , earg = .epstr00 )
+ lambda = eta2theta(eta[, c(FALSE, TRUE)], .llambda , earg = .elambda )
+ (1 - phimat) * lambda
+ }, list( .lpstr00 = lpstr00, .llambda = llambda,
+ .epstr00 = epstr00, .elambda = elambda ))),
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+ misc$link <-
+ c(rep( .lpstr00 , length = ncoly),
+ rep( .llambda , length = ncoly))[interleave.VGAM(M, M = Musual)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+ names(misc$link) <- temp.names
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- temp.names
+ for(ii in 1:ncoly) {
+ misc$earg[[Musual*ii-1]] <- .epstr00
+ misc$earg[[Musual*ii ]] <- .elambda
+ }
+
+ misc$Musual <- Musual
+ misc$imethod <- .imethod
+ misc$expected <- TRUE
+
+ misc$pobs0 = phimat + (1 - phimat) * exp(-lambda) # P(Y=0)
+ if (length(dimnames(y)[[2]]) > 0)
+ dimnames(misc$pobs0) = dimnames(y)
+
+ }), list( .lpstr00 = lpstr00, .llambda = llambda,
+ .epstr00 = epstr00, .elambda = elambda,
+ .imethod = imethod ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ phimat = eta2theta(eta[, c(TRUE, FALSE)], .lpstr00 , earg = .epstr00 )
+ lambda = eta2theta(eta[, c(FALSE, TRUE)], .llambda , earg = .elambda )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * dzipois(x = y, pstr0 = phimat, lambda = lambda,
+ log = TRUE))
+ }
+ }, list( .lpstr00 = lpstr00, .llambda = llambda,
+ .epstr00 = epstr00, .elambda = elambda ))),
+ vfamily = c("zipoisson"),
+ deriv = eval(substitute(expression({
+ Musual <- 2
+ phimat = eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lpstr00 ,
+ earg = .epstr00 )
+ lambda = eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .llambda ,
+ earg = .elambda )
+
+ prob0 = phimat + (1 - phimat) * exp(-lambda)
+ index0 = as.matrix(y == 0)
+
+ dl.dphimat = -expm1(-lambda) / prob0
+ dl.dphimat[!index0] = -1 / (1 - phimat[!index0])
+ dl.dlambda = -(1 - phimat) * exp(-lambda) / prob0
+ dl.dlambda[!index0] = (y[!index0] - lambda[!index0]) / lambda[!index0]
+
+ dphimat.deta = dtheta.deta(phimat, .lpstr00 , earg = .epstr00 )
+ dlambda.deta = dtheta.deta(lambda, .llambda , earg = .elambda )
+
+ ans = c(w) * cbind(dl.dphimat * dphimat.deta,
+ dl.dlambda * dlambda.deta)
+ ans <- ans[, interleave.VGAM(M, M = Musual)]
+
+
+ if ( .llambda == "loge" && is.empty.list( .elambda ) &&
+ any(lambda[!index0] < .Machine$double.eps)) {
+ for(spp. in 1:(M / Musual)) {
+ ans[!index0[, spp.], Musual * spp.] =
+ w[!index0[, spp.]] *
+ (y[!index0[, spp.], spp.] - lambda[!index0[, spp.], spp.])
+ }
+ }
+
+ ans
+ }), list( .lpstr00 = lpstr00, .llambda = llambda,
+ .epstr00 = epstr00, .elambda = elambda ))),
+ weight = eval(substitute(expression({
+ wz = matrix(0.0, nrow = n, ncol = M + M-1)
+
+ d2l.dphimat2 = -expm1(-lambda) / ((1 - phimat) * prob0)
+ d2l.dlambda2 = (1 - phimat) / lambda -
+ phimat * (1 - phimat) * exp(-lambda) / prob0
+ d2l.dphimatlambda = -exp(-lambda) / prob0
+
+ d2l.dphimat2 = as.matrix(d2l.dphimat2)
+ d2l.dlambda2 = as.matrix(d2l.dlambda2)
+ d2l.dphimatlambda = as.matrix(d2l.dphimatlambda)
+
+ for (ii in 1:(M / Musual)) {
+ wz[, iam(Musual * ii - 1, Musual * ii - 1, M)] <-
+ d2l.dphimat2[, ii] * dphimat.deta[, ii]^2
+ wz[, iam(Musual * ii , Musual * ii , M)] <-
+ d2l.dlambda2[, ii] * dlambda.deta[, ii]^2
+ wz[, iam(Musual * ii - 1, Musual * ii , M)] <-
+ d2l.dphimatlambda[, ii] * dphimat.deta[, ii] * dlambda.deta[, ii]
+
+ }
+
+
+
+ c(w) * wz
+ }), list( .llambda = llambda, .elambda = elambda ))))
+} # zipoisson
+
+
+
+
+
+
+
+
+
+ zibinomial = function(lpstr0 = "logit", lprob = "logit",
+ epstr0 = list(), eprob = list(),
+ ipstr0 = NULL,
+ zero = 1, mv = FALSE, imethod = 1)
+{
+ if (as.logical(mv))
+ stop("argument 'mv' must be FALSE")
+
+ if (mode(lpstr0) != "character" && mode(lpstr0) != "name")
+ lpstr0 = as.character(substitute(lpstr0))
+ if (mode(lprob) != "character" && mode(lprob) != "name")
+ lprob = as.character(substitute(lprob))
+
+ if (is.Numeric(ipstr0))
+ if (!is.Numeric(ipstr0, positive = TRUE) || any(ipstr0 >= 1))
+ stop("'ipstr0' values must be inside the interval (0,1)")
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+
+ if (!is.list(epstr0)) epstr0 = list()
+ if (!is.list(eprob )) eprob = list()
+
+
+ new("vglmff",
+ blurb = c("Zero-inflated binomial\n\n",
+ "Links: ",
+ namesof("pstr0", lpstr0, earg = epstr0), ", ",
+ namesof("prob" , lprob , earg = eprob ), "\n",
+ "Mean: (1 - pstr0) * prob / (1 - (1 - prob)^w)"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (!all(w == 1))
+ extra$orig.w = w
+
+
+ {
+ NCOL = function (x)
+ if (is.array(x) && length(dim(x)) > 1 ||
+ is.data.frame(x)) ncol(x) else as.integer(1)
+
+ if (NCOL(y) == 1) {
+ if (is.factor(y)) y <- y != levels(y)[1]
+ nn = rep(1, n)
+ if (!all(y >= 0 & y <= 1))
+ stop("response values must be in [0, 1]")
+ if (!length(mustart) && !length(etastart))
+ mustart = (0.5 + w * y) / (1.0 + w)
+
+
+ no.successes = y
+ if (min(y) < 0)
+ stop("Negative data not allowed!")
+ if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
+ stop("Number of successes must be integer-valued")
+
+ } else if (NCOL(y) == 2) {
+ if (min(y) < 0)
+ stop("Negative data not allowed!")
+ if (any(abs(y - round(y)) > 1.0e-8))
+ stop("Count data must be integer-valued")
+ y = round(y)
+ nvec = y[, 1] + y[, 2]
+ y = ifelse(nvec > 0, y[, 1] / nvec, 0)
+ w = w * nvec
+ if (!length(mustart) && !length(etastart))
+ mustart = (0.5 + nvec * y) / (1 + nvec)
+ } else {
+ stop("for the binomialff family, response 'y' must be a ",
+ "vector of 0 and 1's\n",
+ "or a factor ",
+ "(first level = fail, other levels = success),\n",
+ "or a 2-column matrix where col 1 is the no. of ",
+ "successes and col 2 is the no. of failures")
+ }
+
+ }
+
+
+
+
+
+ predictors.names =
+ c(namesof("pstr0", .lpstr0 , earg = .epstr0 , tag = FALSE),
+ namesof("prob" , .lprob , earg = .eprob , tag = FALSE))
+
+
+ phi.init = if (length( .ipstr0 )) .ipstr0 else {
+ prob0.est = sum(w[y == 0]) / sum(w)
+ if ( .imethod == 1) {
+ (prob0.est - (1 - mustart)^w) / (1 - (1 - mustart)^w)
+ } else {
+ prob0.est
+ }
+ }
+
+ phi.init[phi.init <= -0.10] = 0.50 # Lots of sample variation
+ phi.init[phi.init <= 0.01] = 0.05 # Last resort
+ phi.init[phi.init >= 0.99] = 0.95 # Last resort
+
+ if ( length(mustart) && !length(etastart))
+ mustart = cbind(rep(phi.init, len = n),
+ mustart) # 1st coln not a real mu
+ }), list( .lpstr0 = lpstr0, .lprob = lprob,
+ .epstr0 = epstr0, .eprob = eprob,
+ .ipstr0 = ipstr0,
+ .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ phi = eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
+ mubin = eta2theta(eta[, 2], .lprob , earg = .eprob )
+ (1 - phi) * mubin
+ }, list( .lpstr0 = lpstr0, .lprob = lprob,
+ .epstr0 = epstr0, .eprob = eprob ))),
+ last = eval(substitute(expression({
+ misc$link = c("pstr0" = .lpstr0 , "prob" = .lprob )
+ misc$earg = list("pstr0" = .epstr0 , "prob" = .eprob )
+ misc$imethod = .imethod
+
+
+ if (intercept.only && all(w == w[1])) {
+ phi = eta2theta(eta[1, 1], .lpstr0 , earg = .epstr0 )
+ mubin = eta2theta(eta[1, 2], .lprob , earg = .eprob )
+ misc$pobs0 = phi + (1-phi) * (1-mubin)^w[1] # P(Y=0)
+ }
+ }), list( .lpstr0 = lpstr0, .lprob = lprob,
+ .epstr0 = epstr0, .eprob = eprob,
+ .imethod = imethod ))),
+ linkfun = eval(substitute(function(mu, extra = NULL) {
+ cbind(theta2eta(mu[, 1], .lpstr0 , earg = .epstr0 ),
+ theta2eta(mu[, 2], .lprob , earg = .eprob ))
+ }, list( .lpstr0 = lpstr0, .lprob = lprob,
+ .epstr0 = epstr0, .eprob = eprob ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ pstr0 = eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
+ mubin = eta2theta(eta[, 2], .lprob , earg = .eprob )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(dzibinom(x = round(w * y), size = w, prob = mubin,
+ log = TRUE, pstr0 = pstr0))
+ }
+ }, list( .lpstr0 = lpstr0, .lprob = lprob,
+ .epstr0 = epstr0, .eprob = eprob ))),
+ vfamily = c("zibinomial"),
+ deriv = eval(substitute(expression({
+ phi = eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
+ mubin = eta2theta(eta[, 2], .lprob , earg = .eprob )
+
+ prob0 = (1 - mubin)^w # Actually q^w
+ tmp8 = phi + (1 - phi) * prob0
+ index = (y == 0)
+ dl.dphi = (1 - prob0) / tmp8
+ dl.dphi[!index] = -1 / (1 - phi[!index])
+ dl.dmubin = -w * (1 - phi) * (1 - mubin)^(w - 1) / tmp8
+ dl.dmubin[!index] = w[!index] *
+ (y[!index] / mubin[!index] -
+ (1 - y[!index]) / (1 - mubin[!index]))
+ dphi.deta = dtheta.deta(phi, .lpstr0 , earg = .epstr0 )
+ dmubin.deta = dtheta.deta(mubin, .lprob , earg = .eprob )
+ ans = cbind(dl.dphi * dphi.deta,
+ dl.dmubin * dmubin.deta)
+ if ( .lprob == "logit") {
+ ans[!index,2] = w[!index] * (y[!index] - mubin[!index])
+ }
+ ans
+ }), list( .lpstr0 = lpstr0, .lprob = lprob,
+ .epstr0 = epstr0, .eprob = eprob ))),
+ weight = eval(substitute(expression({
+ wz = matrix(as.numeric(NA), nrow = n, ncol = dimm(M))
+
+
+
+ d2l.dphi2 = (1 - prob0) / ((1 - phi) * tmp8)
+
+
+ d2l.dphimubin = -w * (1 - mubin)^(w - 1) / tmp8
+
+
+
+
+ d2l.dmubin2 = w * (1 - phi) *
+ (1 / (mubin * (1 - mubin)) -
+ (tmp8 * (w - 1) * (1 - mubin)^(w - 2) -
+ (1 - phi) * w * (1 - mubin)^(2*(w - 1))) / tmp8)
+
+
+ wz[,iam(1,1,M)] = d2l.dphi2 * dphi.deta^2
+ wz[,iam(2,2,M)] = d2l.dmubin2 * dmubin.deta^2
+ wz[,iam(1,2,M)] = d2l.dphimubin * dphi.deta * dmubin.deta
+ if (TRUE) {
+ ind6 = (wz[,iam(2,2,M)] < .Machine$double.eps)
+ if (any(ind6))
+ wz[ind6,iam(2,2,M)] = .Machine$double.eps
+ }
+ wz
+ }), list( .lpstr0 = lpstr0, .lprob = lprob,
+ .epstr0 = epstr0, .eprob = eprob ))))
+}
+
+
+
+
+
+
+
+
+
+
+dzibinom = function(x, size, prob, pstr0 = 0, log = FALSE) {
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ LLL = max(length(x), length(size), length(prob), length(pstr0))
+ if (length(x) != LLL) x = rep(x, len = LLL);
+ if (length(size) != LLL) size = rep(size, len = LLL);
+ if (length(prob) != LLL) prob = rep(prob, len = LLL);
+ if (length(pstr0) != LLL) pstr0 = rep(pstr0, len = LLL);
+
+ ans = dbinom(x = x, size = size, prob = prob, log = TRUE)
+
+
+ ans = if (log.arg) {
+ ifelse(x == 0, log(pstr0 + (1-pstr0) * exp(ans)), log1p(-pstr0) + ans)
+ } else {
+ ifelse(x == 0, pstr0 + (1-pstr0) * exp(ans) ,
+ (1-pstr0) * exp(ans))
+ }
+
+
+ prob0 = (1 - prob)^size
+ deflat_limit = -prob0 / (1 - prob0)
+ ans[pstr0 < deflat_limit] = NaN
+ ans[pstr0 > 1] = NaN
+
+
+ ans
+}
+
+
+pzibinom = function(q, size, prob, pstr0 = 0,
+ lower.tail = TRUE, log.p = FALSE) {
+
+ LLL = max(length(pstr0), length(size), length(prob), length(q))
+ if (length(q) != LLL) q = rep(q, len = LLL);
+ if (length(size) != LLL) size = rep(size, len = LLL);
+ if (length(prob) != LLL) prob = rep(prob, len = LLL);
+ if (length(pstr0) != LLL) pstr0 = rep(pstr0, len = LLL);
+
+ ans = pbinom(q, size, prob, lower.tail = lower.tail, log.p = log.p)
+ ans = ifelse(q < 0, 0, pstr0 + (1 - pstr0) * ans)
+
+
+ prob0 = (1 - prob)^size
+ deflat_limit = -prob0 / (1 - prob0)
+ ans[pstr0 < deflat_limit] = NaN
+ ans[pstr0 > 1] = NaN
+
+ ans
+}
+
+
+qzibinom = function(p, size, prob, pstr0 = 0,
+ lower.tail = TRUE, log.p = FALSE) {
+ LLL = max(length(p), length(size), length(prob), length(pstr0))
+ p = rep(p, length = LLL)
+ size = rep(size, length = LLL)
+ prob = rep(prob, length = LLL)
+ pstr0 = rep(pstr0, length = LLL)
+
+
+ ans = p
+ ans[p <= pstr0] = 0
+ ans[p > pstr0] =
+ qbinom((p[p > pstr0] - pstr0[p > pstr0]) / (1 - pstr0[p > pstr0]),
+ size[p > pstr0],
+ prob[p > pstr0],
+ lower.tail = lower.tail, log.p = log.p)
+
+
+
+ prob0 = (1 - prob)^size
+ deflat_limit = -prob0 / (1 - prob0)
+ ind0 = (deflat_limit <= pstr0) & (pstr0 < 0)
+ if (any(ind0)) {
+ pobs0 = pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
+ ans[p[ind0] <= pobs0] = 0
+ pindex = (1:LLL)[ind0 & (p > pobs0)]
+ Pobs0 = pstr0[pindex] + (1 - pstr0[pindex]) * prob0[pindex]
+ ans[pindex] = qposbinom((p[pindex] - Pobs0) / (1 - Pobs0),
+ size = size[pindex],
+ prob = prob[pindex])
+ }
+
+ ans[pstr0 < deflat_limit] = NaN
+ ans[pstr0 > 1] = NaN
+
+
+
+
+ ans
+}
+
+
+rzibinom = function(n, size, prob, pstr0 = 0) {
+ use.n = if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+
+ pstr0 = rep(pstr0, len = use.n)
+ size = rep(size, len = use.n)
+ prob = rep(prob, len = use.n)
+
+ ans = rbinom(use.n, size, prob)
+ ans[runif(use.n) < pstr0] <- 0
+
+
+
+ prob0 = (1 - prob)^size
+ deflat_limit = -prob0 / (1 - prob0)
+ ind0 = (deflat_limit <= pstr0) & (pstr0 < 0)
+ if (any(ind0)) {
+ pobs0 = pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
+ ans[ind0] = rposbinom(sum(ind0), size = size[ind0], prob = prob[ind0])
+ ans[ind0] = ifelse(runif(sum(ind0)) < pobs0, 0, ans[ind0])
+ }
+
+ ans[pstr0 < deflat_limit] = NaN
+ ans[pstr0 > 1] = NaN
+
+
+ ans
+}
+
+
+
+
+
+
+
+
+
+
+
+
+dzinegbin = function(x, size, prob = NULL, munb = NULL, pstr0 = 0,
+ log = FALSE) {
+ if (length(munb)) {
+ if (length(prob))
+ stop("arguments 'prob' and 'munb' both specified")
+ prob <- size / (size + munb)
+ }
+
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+
+ LLL = max(length(pstr0), length(size), length(prob), length(x))
+ if (length(x) != LLL) x = rep(x, len = LLL);
+ if (length(size) != LLL) size = rep(size, len = LLL);
+ if (length(prob) != LLL) prob = rep(prob, len = LLL);
+ if (length(pstr0) != LLL) pstr0 = rep(pstr0, len = LLL);
+
+
+ ans = dnbinom(x = x, size = size, prob = prob, log = log.arg)
+
+ ans = if (log.arg)
+ ifelse(x == 0, log(pstr0+(1-pstr0)*exp(ans)), log1p(-pstr0) + ans) else
+ ifelse(x == 0, pstr0+(1-pstr0)* ans, (1-pstr0) * ans)
+
+
+
+ prob0 = prob^size
+ deflat_limit = -prob0 / (1 - prob0)
+ ans[pstr0 < deflat_limit] = NaN
+ ans[pstr0 > 1] = NaN
+
+
+ ans
+}
+
+
+pzinegbin = function(q, size, prob = NULL, munb = NULL, pstr0 = 0) {
+ if (length(munb)) {
+ if (length(prob))
+ stop("arguments 'prob' and 'munb' both specified")
+ prob <- size / (size + munb)
+ }
+
+ LLL = max(length(pstr0), length(size), length(prob), length(q))
+ if (length(q) != LLL) q = rep(q, len = LLL);
+ if (length(size) != LLL) size = rep(size, len = LLL);
+ if (length(prob) != LLL) prob = rep(prob, len = LLL);
+ if (length(pstr0) != LLL) pstr0 = rep(pstr0, len = LLL);
+
+
+
+ ans = pnbinom(q = q, size = size, prob = prob)
+ ans = ifelse(q < 0, 0, pstr0 + (1 - pstr0) * ans)
+
+
+
+ prob0 = prob^size
+ deflat_limit = -prob0 / (1 - prob0)
+ ans[pstr0 < deflat_limit] = NaN
+ ans[pstr0 > 1] = NaN
+
+
+ ans
+}
+
+
+qzinegbin = function(p, size, prob = NULL, munb = NULL, pstr0 = 0) {
+ if (length(munb)) {
+ if (length(prob))
+ stop("arguments 'prob' and 'munb' both specified")
+ prob <- size/(size + munb)
+ }
+ LLL = max(length(p), length(prob), length(pstr0), length(size))
+ if (length(p) != LLL) p = rep(p, len = LLL)
+ if (length(pstr0) != LLL) pstr0 = rep(pstr0, len = LLL);
+ if (length(prob) != LLL) prob = rep(prob, len = LLL)
+ if (length(size) != LLL) size = rep(size, len = LLL);
+
+ ans = p
+ ind4 = (p > pstr0)
+ ans[!ind4] = 0
+ ans[ ind4] = qnbinom(p = (p[ind4] - pstr0[ind4]) / (1 - pstr0[ind4]),
+ size = size[ind4], prob = prob[ind4])
+
+
+
+ prob0 = prob^size
+ deflat_limit = -prob0 / (1 - prob0)
+ ind0 = (deflat_limit <= pstr0) & (pstr0 < 0)
+ if (any(ind0)) {
+ pobs0 = pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
+ ans[p[ind0] <= pobs0] = 0
+ pindex = (1:LLL)[ind0 & (p > pobs0)]
+ Pobs0 = pstr0[pindex] + (1 - pstr0[pindex]) * prob0[pindex]
+ ans[pindex] = qposnegbin((p[pindex] - Pobs0) / (1 - Pobs0),
+ size = size[pindex],
+ prob = prob[pindex])
+ }
+
+
+ ans[pstr0 < deflat_limit] = NaN
+ ans[pstr0 > 1] = NaN
+
+
+
+ ans
+}
+
+
+rzinegbin = function(n, size, prob = NULL, munb = NULL, pstr0 = 0) {
+ if (length(munb)) {
+ if (length(prob))
+ stop("arguments 'prob' and 'munb' both specified")
+ prob <- size / (size + munb)
+ }
+
+ use.n = if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+
+
+ pstr0 = rep(pstr0, len = use.n)
+ size = rep(size, len = use.n)
+ prob = rep(prob, len = use.n)
+
+
+ ans = rnbinom(n = use.n, size = size, prob = prob)
+ ans = ifelse(runif(use.n) < pstr0, rep(0, use.n), ans)
+
+
+
+ prob0 = rep(prob^size, len = use.n)
+ deflat_limit = -prob0 / (1 - prob0)
+ ind0 = (deflat_limit <= pstr0) & (pstr0 < 0)
+ if (any(ind0, na.rm = TRUE)) {
+ pobs0 = pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
+ ans[ind0] = rposnegbin(sum(ind0, na.rm = TRUE), size = size[ind0],
+ prob = prob[ind0])
+ ans[ind0] = ifelse(runif(sum(ind0)) < pobs0, 0, ans[ind0])
+ }
+
+ ans[pstr0 < deflat_limit] = NaN
+ ans[pstr0 > 1] = NaN
+
+ ans
+}
+
+
+
+
+
+
+
+zinegbinomial.control <- function(save.weight = TRUE, ...)
+{
+ list(save.weight = save.weight)
+}
+
+
+ zinegbinomial =
+ function(lpstr0 = "logit", lmunb = "loge", lsize = "loge",
+ epstr0 = list(), emunb = list(), esize = list(),
+ ipstr0 = NULL, isize = NULL,
+ zero = c(-1, -3),
+ imethod = 1, shrinkage.init = 0.95,
+ nsimEIM = 250)
+{
+
+ if (mode(lpstr0) != "character" && mode(lpstr0) != "name")
+ lpstr0 = as.character(substitute(lpstr0))
+ if (mode(lmunb) != "character" && mode(lmunb) != "name")
+ lmunb = as.character(substitute(lmunb))
+ if (mode(lsize) != "character" && mode(lsize) != "name")
+ lsize = as.character(substitute(lsize))
+
+
+ if (length(ipstr0) &&
+ (!is.Numeric(ipstr0, positive = TRUE) ||
+ any(ipstr0 >= 1)))
+ stop("argument 'ipstr0' must contain values in (0,1)")
+ if (length(isize) && !is.Numeric(isize, positive = TRUE))
+ stop("argument 'isize' must contain positive values only")
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 3)
+ stop("argument 'imethod' must be 1, 2 or 3")
+
+ if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE))
+ stop("argument 'nsimEIM' must be a positive integer")
+ if (nsimEIM <= 50)
+ warning("argument 'nsimEIM' should be greater than 50, say")
+
+ if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+ shrinkage.init < 0 ||
+ shrinkage.init > 1)
+ stop("bad input for argument 'shrinkage.init'")
+
+ if (!is.list(epstr0)) epstr0 = list()
+ if (!is.list(emunb)) emunb = list()
+ if (!is.list(esize)) esize = list()
+
+ new("vglmff",
+ blurb = c("Zero-inflated negative binomial\n\n",
+ "Links: ",
+ namesof("pstr0", lpstr0, earg = epstr0, tag = FALSE), ", ",
+ namesof("munb", lmunb, earg = emunb, tag = FALSE), ", ",
+ namesof("size", lsize, earg = esize, tag = FALSE), "\n",
+ "Mean: (1 - pstr0) * munb"),
+ constraints = eval(substitute(expression({
+
+ dotzero <- .zero
+ Musual <- 3
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ Musual <- 3
+ y <- as.matrix(y)
+ extra$NOS = NOS = ncoly = ncol(y) # Number of species
+ if (length(dimnames(y)))
+ extra$dimnamesy2 = dimnames(y)[[2]]
+
+ mynames1 = if (NOS == 1) "pstr0" else paste("pstr0", 1:NOS, sep = "")
+ mynames2 = if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = "")
+ mynames3 = if (NOS == 1) "size" else paste("size", 1:NOS, sep = "")
+ predictors.names =
+ c(namesof(mynames1, .lpstr0 , earg = .epstr0 , tag = FALSE),
+ namesof(mynames2, .lmunb , earg = .emunb , tag = FALSE),
+ namesof(mynames3, .lsize , earg = .esize , tag = FALSE))[
+ interleave.VGAM(Musual*NOS, M = Musual)]
+
+ if (!length(etastart)) {
+ mum.init = if ( .imethod == 3) {
+ y + 1/16
+ } else {
+ mum.init = y
+ for(iii in 1:ncol(y)) {
+ index = (y[, iii] > 0)
+ mum.init[, iii] = if ( .imethod == 2)
+ weighted.mean(y[index, iii], w = w[index]) else
+ median(rep(y[index, iii], times = w[index])) + 1/8
+ }
+ (1 - .sinit) * (y + 1/16) + .sinit * mum.init
+ }
+
+
+ pstr0.init = if (length( .ipstr0 )) {
+ matrix( .ipstr0 , n, ncoly, byrow = TRUE)
+ } else {
+ pstr0.init = y
+ for(iii in 1:ncol(y))
+ pstr0.init[, iii] = sum(w[y[, iii] == 0]) / sum(w)
+ pstr0.init[pstr0.init <= 0.02] = 0.02 # Last resort
+ pstr0.init[pstr0.init >= 0.98] = 0.98 # Last resort
+ pstr0.init
+ }
+
+ kay.init =
+ if ( is.Numeric( .isize )) {
+ matrix( .isize, nrow = n, ncol = ncoly, byrow = TRUE)
+ } else {
+ zinegbin.Loglikfun = function(kval, y, x, w, extraargs) {
+ index0 = (y == 0)
+ pstr0vec = extraargs$pstr0
+ muvec = extraargs$mu
+
+
+ ans1 = 0.0
+ if (any( index0))
+ ans1 = ans1 + sum(w[ index0] *
+ dzinegbin(x = y[ index0], size = kval,
+ munb = muvec[ index0],
+ pstr0 = pstr0vec[ index0], log = TRUE))
+ if (any(!index0))
+ ans1 = ans1 + sum(w[!index0] *
+ dzinegbin(x = y[!index0], size = kval,
+ munb = muvec[!index0],
+ pstr0 = pstr0vec[!index0], log = TRUE))
+ ans1
+ }
+ k.grid = 2^((-6):6)
+ kay.init = matrix(0, nrow = n, ncol = NOS)
+ for(spp. in 1:NOS) {
+ kay.init[, spp.] = getMaxMin(k.grid,
+ objfun = zinegbin.Loglikfun,
+ y = y[, spp.], x = x, w = w,
+ extraargs = list(pstr0 = pstr0.init[, spp.],
+ mu = mum.init[, spp.]))
+ }
+ kay.init
+ }
+
+ etastart = cbind(theta2eta(pstr0.init, .lpstr0 , earg = .epstr0 ),
+ theta2eta(mum.init, .lmunb , earg = .emunb ),
+ theta2eta(kay.init, .lsize , earg = .esize ))
+ etastart =
+ etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ }
+ }), list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
+ .epstr0 = epstr0, .emunb = emunb, .esize = esize,
+ .ipstr0 = ipstr0, .isize = isize,
+ .sinit = shrinkage.init,
+ .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ Musual = 3
+ NOS = extra$NOS
+ pstr0 = eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
+ .lpstr0 , earg = .epstr0 )
+ munb = eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ .lmunb , earg = .emunb )
+ fv.matrix = (1 - pstr0) * munb
+ if (length(extra$dimnamesy2))
+ dimnames(fv.matrix) = list(dimnames(pstr0)[[1]], extra$dimnamesy2)
+ fv.matrix
+ }, list( .lpstr0 = lpstr0, .lsize = lsize, .lmunb = lmunb,
+ .epstr0 = epstr0, .esize = esize, .emunb = emunb ))),
+ last = eval(substitute(expression({
+ misc$link =
+ c(rep( .lpstr0 , length = NOS),
+ rep( .lmunb , length = NOS),
+ rep( .lsize , length = NOS))[interleave.VGAM(Musual*NOS,
+ M = Musual)]
+ temp.names =
+ c(mynames1,
+ mynames2,
+ mynames3)[interleave.VGAM(Musual*NOS, M = Musual)]
+ names(misc$link) = temp.names
+
+ misc$earg = vector("list", Musual*NOS)
+ names(misc$earg) = temp.names
+ for(ii in 1:NOS) {
+ misc$earg[[Musual*ii-2]] = .epstr0
+ misc$earg[[Musual*ii-1]] = .emunb
+ misc$earg[[Musual*ii ]] = .esize
+ }
+
+ misc$imethod = .imethod
+ misc$nsimEIM = .nsimEIM
+ misc$expected = TRUE
+ misc$Musual = Musual
+ misc$ipstr0 = .ipstr0
+ misc$isize = .isize
+ if (intercept.only) {
+ pstr0.val = eta2theta(eta[1,Musual*(1:NOS)-2], .lpstr0 , earg= .epstr0 )
+ munb.val = eta2theta(eta[1,Musual*(1:NOS)-1], .lmunb , earg= .emunb )
+ kval = eta2theta(eta[1,Musual*(1:NOS) ], .lsize , earg= .esize )
+ misc$pobs0 = pstr0.val +
+ (1 - pstr0.val) * (kval / (kval + munb.val))^kval # P(Y=0)
+ }
+ }), list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
+ .epstr0 = epstr0, .emunb = emunb, .esize = esize,
+ .ipstr0 = ipstr0, .isize = isize,
+ .nsimEIM = nsimEIM, .imethod = imethod ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ Musual <- 3
+ NOS = extra$NOS
+ pstr0 = eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
+ .lpstr0 , earg = .epstr0 )
+ munb = eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ .lmunb , earg = .emunb )
+ kmat = eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
+ .lsize , earg = .esize )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * dzinegbin(x = y, size = kmat, munb = munb,
+ pstr0 = pstr0, log = TRUE))
+ }
+ }, list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
+ .epstr0 = epstr0, .emunb = emunb, .esize = esize ))),
+ vfamily = c("zinegbinomial"),
+ deriv = eval(substitute(expression({
+ Musual <- 3
+ NOS = extra$NOS
+
+ pstr0 = eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
+ .lpstr0 , earg = .epstr0 )
+ munb = eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ .lmunb , earg = .emunb )
+ kmat = eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
+ .lsize , earg = .esize )
+
+ dpstr0.deta = dtheta.deta(pstr0, .lpstr0 , earg = .epstr0 )
+ dmunb.deta = dtheta.deta(munb , .lmunb , earg = .emunb )
+ dsize.deta = dtheta.deta(kmat , .lsize , earg = .esize )
+ dthetas.detas =
+ (cbind(dpstr0.deta,
+ dmunb.deta,
+ dsize.deta))[, interleave.VGAM(Musual*NOS, M = Musual)]
+
+
+
+ dl.dpstr0 = -1 / (1 - pstr0)
+ dl.dmunb = y / munb - (y + kmat) / (munb + kmat)
+ dl.dsize = digamma(y + kmat) - digamma(kmat) -
+ (y + kmat) / (munb + kmat) + 1 +
+ log(kmat / (kmat + munb))
+
+
+
+ for(spp. in 1:NOS) {
+ index0 = (y[, spp.] == 0)
+ if (!any(index0) || !any(!index0))
+ stop("must have some 0s AND some positive counts in the data")
+
+ kmat. = kmat[index0, spp.]
+ munb. = munb[index0, spp.]
+ pstr0. = pstr0[index0, spp.]
+
+
+ tempk. = kmat. / (kmat. + munb.)
+ tempm. = munb. / (kmat. + munb.)
+ prob0. = tempk.^kmat.
+ df0.dmunb. = -tempk.* prob0.
+ df0.dkmat. = prob0. * (tempm. + log(tempk.))
+
+
+ denom. = pstr0. + (1 - pstr0.) * prob0.
+ dl.dpstr0[index0, spp.] = (1 - prob0.) / denom.
+ dl.dmunb[index0, spp.] = (1 - pstr0.) * df0.dmunb. / denom.
+ dl.dsize[index0, spp.] = (1 - pstr0.) * df0.dkmat. / denom.
+ } # of spp.
+
+
+ dl.dthetas =
+ cbind(dl.dpstr0,
+ dl.dmunb,
+ dl.dsize)[, interleave.VGAM(Musual*NOS, M = Musual)]
+
+
+ c(w) * dl.dthetas * dthetas.detas
+ }), list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
+ .epstr0 = epstr0, .emunb = emunb, .esize = esize ))),
+
+ weight = eval(substitute(expression({
+
+
+
+ wz = matrix(0, n, Musual*M - Musual)
+
+ ind3 = iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
+
+ run.varcov = array(0.0, c(n, length(ind3$row.index), NOS))
+
+ for(ii in 1:( .nsimEIM )) {
+ ysim = rzinegbin(n = n*NOS, pstr0 = pstr0,
+ size = kmat, mu = munb)
+ dim(ysim) = c(n, NOS)
+ index0 = (ysim[, spp.] == 0)
+
+ dl.dpstr0 = -1 / (1 - pstr0)
+ dl.dmunb = ysim / munb - (ysim + kmat) / (munb + kmat)
+ dl.dsize = digamma(ysim + kmat) - digamma(kmat) -
+ (ysim + kmat) / (munb + kmat) + 1 +
+ log(kmat / (kmat + munb))
+
+
+ for(spp. in 1:NOS) {
+ index0 = (ysim[, spp.] == 0)
+ if (!any(index0) || !any(!index0))
+ stop("must have some 0s AND some positive counts in the data")
+
+ kmat. = kmat[index0, spp.]
+ munb. = munb[index0, spp.]
+ pstr0. = pstr0[index0, spp.]
+
+
+ tempk. = kmat. / (kmat. + munb.)
+ tempm. = munb. / (kmat. + munb.)
+ prob0. = tempk.^kmat.
+ df0.dmunb. = -tempk.* prob0.
+ df0.dkmat. = prob0. * (tempm. + log(tempk.))
+
+
+ denom. = pstr0. + (1 - pstr0.) * prob0.
+ dl.dpstr0[index0, spp.] = (1 - prob0.) / denom.
+ dl.dmunb[index0, spp.] = (1 - pstr0.) * df0.dmunb. / denom.
+ dl.dsize[index0, spp.] = (1 - pstr0.) * df0.dkmat. / denom.
+
+
+ sdl.dthetas = cbind(dl.dpstr0[, spp.],
+ dl.dmunb[, spp.],
+ dl.dsize[, spp.])
+
+ temp3 = sdl.dthetas
+ run.varcov[,, spp.] = run.varcov[,, spp.] +
+ temp3[, ind3$row.index] *
+ temp3[, ind3$col.index]
+
+
+ } # End of for(spp.) loop
+ } # End of ii nsimEIM loop
+
+ run.varcov = run.varcov / .nsimEIM
+
+ wz1 = if (intercept.only) {
+ for(spp. in 1:NOS) {
+ for(jay in 1:length(ind3$row.index)) {
+ run.varcov[, jay, spp.] = mean(run.varcov[, jay, spp.])
+ }
+ }
+ run.varcov
+ } else {
+ run.varcov
+ }
+
+ for(spp. in 1:NOS) {
+ wz1[,, spp.] = wz1[,, spp.] *
+ dthetas.detas[, Musual * (spp. - 1) + ind3$row] *
+ dthetas.detas[, Musual * (spp. - 1) + ind3$col]
+ }
+
+ for(spp. in 1:NOS) {
+ for(jay in 1:Musual) {
+ for(kay in jay:Musual) {
+ cptr = iam((spp. - 1) * Musual + jay,
+ (spp. - 1) * Musual + kay, M = M)
+ temp.wz1 = wz1[,, spp.]
+ wz[, cptr] = temp.wz1[, iam(jay, kay, M = Musual)]
+ }
+ }
+ }
+ c(w) * wz
+ }), list( .lpstr0 = lpstr0,
+ .epstr0 = epstr0, .nsimEIM = nsimEIM ))))
+} # End of zinegbinomial
+
+
+
+
+
+
+
+
+ zipoissonff <- function(llambda = "loge", lprobp = "logit",
+ elambda = list(), eprobp = list(),
+ ilambda = NULL, iprobp = NULL, imethod = 1,
+ shrinkage.init = 0.8, zero = -2)
+{
+ lprobp. <- lprobp
+ eprobp. <- eprobp
+ iprobp. <- iprobp
+
+ if (mode(llambda) != "character" && mode(llambda) != "name")
+ llambda <- as.character(substitute(llambda))
+ if (mode(lprobp.) != "character" && mode(lprobp.) != "name")
+ lprobp. <- as.character(substitute(lprobp.))
+
+ if (length(ilambda))
+ if (!is.Numeric(ilambda, positive = TRUE))
+ stop("'ilambda' values must be positive")
+ if (length(iprobp.))
+ if (!is.Numeric(iprobp., positive = TRUE) ||
+ any(iprobp. >= 1))
+ stop("'iprobp' values must be inside the interval (0,1)")
+
+ if (!is.list(elambda)) elambda <- list()
+ if (!is.list(eprobp.)) eprobp. <- list()
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+
+ if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+ shrinkage.init < 0 ||
+ shrinkage.init > 1)
+ stop("bad input for argument 'shrinkage.init'")
+
+ new("vglmff",
+ blurb = c("Zero-inflated Poisson\n\n",
+ "Links: ",
+ namesof("lambda", llambda, earg = elambda), ", ",
+ namesof("probp", lprobp., earg = eprobp.), "\n",
+ "Mean: probp * lambda"),
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 2
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ zero = .zero)
+ }, list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ y <- as.matrix(y)
+
+ ncoly <- ncol(y)
+ Musual <- 2
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+ if (any(round(y) != y))
+ stop("responses must be integer-valued")
+
+ mynames1 <- paste("lambda", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames2 <- paste("probp", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ c(namesof(mynames1, .llambda, earg = .elambda, tag = FALSE),
+ namesof(mynames2, .lprobp., earg = .eprobp., tag = FALSE))[
+ interleave.VGAM(M, M = Musual)]
+
+
+ if (!length(etastart)) {
+
+ matL <- matrix(if (length( .ilambda )) .ilambda else 0,
+ n, ncoly, byrow = TRUE)
+ matP <- matrix(if (length( .iprobp. )) .iprobp. else 0,
+ n, ncoly, byrow = TRUE)
+
+ for (jay in 1:ncoly) {
+ yjay <- y[, jay]
+
+ Phi0.init <- 1 - 0.85 * sum(w[yjay > 0]) / sum(w)
+ Phi0.init[Phi0.init <= 0.02] = 0.02 # Last resort
+ Phi0.init[Phi0.init >= 0.98] = 0.98 # Last resort
+
+ if ( length(mustart)) {
+ mustart <- matrix(mustart, n, ncoly) # Make sure right size
+ Lambda.init <- mustart / (1 - Phi0.init)
+ } else if ( .imethod == 2) {
+ mymean <- weighted.mean(yjay[yjay > 0], w[yjay > 0]) + 1/16
+ Lambda.init <- (1 - .sinit) * (yjay + 1/8) + .sinit * mymean
+ } else {
+ use.this <- median(yjay[yjay > 0]) + 1 / 16
+ Lambda.init <- (1 - .sinit) * (yjay + 1/8) + .sinit * use.this
+ }
+
+ zipois.Loglikfun <- function(phival, y, x, w, extraargs) {
+ sum(w * dzipois(x = y, pstr0 = phival,
+ lambda = extraargs$lambda,
+ log = TRUE))
+ }
+ phi0.grid <- seq(0.02, 0.98, len = 21)
+ Phi0mat.init <- getMaxMin(phi0.grid,
+ objfun = zipois.Loglikfun,
+ y = y, x = x, w = w,
+ extraargs = list(lambda = Lambda.init))
+ if (length(mustart)) {
+ Lambda.init <- Lambda.init / (1 - Phi0mat.init)
+ }
+
+ if (!length( .ilambda ))
+ matL[, jay] <- Lambda.init
+ if (!length( .iprobp. ))
+ matP[, jay] <- Phi0mat.init
+ }
+
+ etastart <- cbind(theta2eta( matL, .llambda , earg = .elambda ),
+ theta2eta(1 - matP, .lprobp. , earg = .eprobp. ))[,
+ interleave.VGAM(M, M = Musual)]
+
+ mustart <- NULL # Since etastart has been computed.
+ }
+ }), list( .lprobp. = lprobp., .llambda = llambda,
+ .eprobp. = eprobp., .elambda = elambda,
+ .iprobp. = iprobp., .ilambda = ilambda,
+ .imethod = imethod, .sinit = shrinkage.init ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ Musual <- 2
+ ncoly <- extra$ncoly
+ lambda <- eta2theta(eta[, Musual*(1:ncoly) - 1], .llambda,
+ earg = .elambda )
+ probp. <- eta2theta(eta[, Musual*(1:ncoly) ], .lprobp.,
+ earg = .eprobp. )
+ probp. * lambda
+ }, list( .lprobp. = lprobp., .llambda = llambda,
+ .eprobp. = eprobp., .elambda = elambda ))),
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+ misc$link <-
+ c(rep( .llambda, length = ncoly),
+ rep( .lprobp., length = ncoly))[interleave.VGAM(M, M = Musual)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+ names(misc$link) <- temp.names
+
+
+ misc$earg <- vector("list", Musual * ncoly)
+ names(misc$earg) <- temp.names
+ for(ii in 1:ncoly) {
+ misc$earg[[Musual*ii-1]] <- .elambda
+ misc$earg[[Musual*ii ]] <- .eprobp.
+ }
+
+ misc$Musual <- Musual
+ misc$imethod <- .imethod
+ misc$expected = TRUE
+
+ misc$pobs0 <- (1 - probp.) + probp. * exp(-lambda) # P(Y=0)
+ misc$pobs0 <- as.matrix(misc$pobs0)
+ if (length(dimnames(y)[[2]]) > 0)
+ dimnames(misc$pobs0) = dimnames(y)
+ }), list( .lprobp. = lprobp., .llambda = llambda,
+ .eprobp. = eprobp., .elambda = elambda,
+ .imethod = imethod ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ Musual <- 2
+ ncoly <- extra$ncoly
+ lambda <- eta2theta(eta[, Musual*(1:ncoly) - 1], .llambda,
+ earg = .elambda )
+ probp. <- eta2theta(eta[, Musual*(1:ncoly) ], .lprobp.,
+ earg = .eprobp. )
+
+
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * dzipois(x = y, pstr0 = 1 - probp., lambda = lambda,
+ log = TRUE))
+ }
+ }, list( .lprobp. = lprobp., .llambda = llambda,
+ .eprobp. = eprobp., .elambda = elambda ))),
+ vfamily = c("zipoissonff"),
+ deriv = eval(substitute(expression({
+ Musual <- 2
+ ncoly <- extra$ncoly
+ lambda <- eta2theta(eta[, Musual*(1:ncoly) - 1], .llambda,
+ earg = .elambda )
+ probp. <- eta2theta(eta[, Musual*(1:ncoly) ], .lprobp.,
+ earg = .eprobp. )
+
+
+ dlambda.deta <- dtheta.deta(lambda, .llambda, earg = .elambda )
+ dprobp..deta <- dtheta.deta(probp., .lprobp., earg = .eprobp. )
+
+ denom <- 1 + probp. * expm1(-lambda)
+ ind0 <- (y == 0)
+ dl.dlambda <- -probp. * exp(-lambda) / denom
+ dl.dlambda[!ind0] <- (y[!ind0] - lambda[!ind0]) / lambda[!ind0]
+ dl.dprobp. <- expm1(-lambda) / denom
+ dl.dprobp.[!ind0] <- 1 / probp.[!ind0]
+
+ ans <- c(w) * cbind(dl.dlambda * dlambda.deta,
+ dl.dprobp. * dprobp..deta)
+ ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
+
+
+ if ( .llambda == "loge" && is.empty.list( .elambda ) &&
+ any(lambda[!ind0] < .Machine$double.eps)) {
+ for(spp. in 1:ncoly) {
+ ans[!ind0[, spp.], Musual * spp.] =
+ w[!ind0[, spp.]] *
+ (y[!ind0[, spp.], spp.] - lambda[!ind0[, spp.], spp.])
+ }
+ }
+
+
+
+ ans
+ }), list( .lprobp. = lprobp., .llambda = llambda,
+ .eprobp. = eprobp., .elambda = elambda ))),
+ weight = eval(substitute(expression({
+
+
+ wz <- matrix(0, nrow = n, ncol = M + M-1)
+ d2l.dlambda2 <- ( probp.) / lambda -
+ probp. * (1 - probp.) * exp(-lambda) / denom
+ d2l.dprobp.2 <- -expm1(-lambda) / (( probp.) * denom)
+ d2l.dphilambda <- +exp(-lambda) / denom
+
+
+ if (ncoly == 1) { # Make sure these are matrices
+ d2l.dlambda2 <- cbind(d2l.dlambda2)
+ d2l.dprobp.2 <- cbind(d2l.dprobp.2)
+ dlambda.deta <- cbind(dlambda.deta)
+ dprobp..deta <- cbind(dprobp..deta)
+ d2l.dphilambda <- cbind(d2l.dphilambda)
+ }
+
+ for (ii in 1:ncoly) {
+ wz[, iam(Musual*ii - 1, Musual*ii - 1, M)] <-
+ d2l.dlambda2[, ii] *
+ dlambda.deta[, ii]^2
+ wz[, iam(Musual*ii , Musual*ii , M)] <-
+ d2l.dprobp.2[, ii] *
+ dprobp..deta[, ii]^2
+ wz[, iam(Musual*ii - 1, Musual*ii , M)] <-
+ d2l.dphilambda[, ii] *
+ dprobp..deta[, ii] *
+ dlambda.deta[, ii]
+
+
+
+ } # ii
+
+
+ c(w) * wz
+ }), list( .llambda = llambda ))))
+}
+
+
+
+
+
+
+
+dzigeom = function(x, prob, pstr0 = 0, log = FALSE) {
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ LLL = max(length(x), length(prob), length(pstr0))
+ if (length(x) != LLL) x = rep(x, len = LLL);
+ if (length(prob) != LLL) prob = rep(prob, len = LLL);
+ if (length(pstr0) != LLL) pstr0 = rep(pstr0, len = LLL);
+
+
+ ans = dgeom(x = x, prob = prob, log = TRUE)
+
+
+ ans = if (log.arg) {
+ ifelse(x == 0, log(pstr0 + (1 - pstr0) * exp(ans)),
+ log1p(-pstr0) + ans)
+ } else {
+ ifelse(x == 0, pstr0 + (1 - pstr0) * exp(ans) ,
+ (1 - pstr0) * exp(ans))
+ }
+
+
+
+ prob0 = prob
+ deflat_limit = -prob0 / (1 - prob0)
+ ans[pstr0 < deflat_limit] = NaN
+ ans[pstr0 > 1] = NaN
+
+ ans
+}
+
+
+
+pzigeom = function(q, prob, pstr0 = 0) {
+
+
+ LLL = max(length(q), length(prob), length(pstr0))
+ if (length(q) != LLL) q = rep(q, len = LLL);
+ if (length(prob) != LLL) prob = rep(prob, len = LLL);
+ if (length(pstr0) != LLL) pstr0 = rep(pstr0, len = LLL);
+
+ ans = pgeom(q, prob)
+ ans = ifelse(q < 0, 0, pstr0 + (1-pstr0) * ans)
+
+
+ prob0 = prob
+ deflat_limit = -prob0 / (1 - prob0)
+ ans[pstr0 < deflat_limit] = NaN
+ ans[pstr0 > 1] = NaN
+
+ ans
+}
+
+
+
+qzigeom = function(p, prob, pstr0 = 0) {
+ LLL = max(length(p), length(prob), length(pstr0))
+ ans = p = rep(p, len = LLL)
+ prob = rep(prob, len = LLL)
+ pstr0 = rep(pstr0, len = LLL)
+ ans[p <= pstr0] = 0
+ ind1 = (p > pstr0)
+ ans[ind1] =
+ qgeom((p[ind1] - pstr0[ind1]) / (1 - pstr0[ind1]),
+ prob = prob[ind1])
+
+
+ prob0 = prob
+ deflat_limit = -prob0 / (1 - prob0)
+ ind0 = (deflat_limit <= pstr0) & (pstr0 < 0)
+ if (any(ind0)) {
+ pobs0 = pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
+ ans[p[ind0] <= pobs0] = 0
+ pindex = (1:LLL)[ind0 & (p > pobs0)]
+ Pobs0 = pstr0[pindex] + (1 - pstr0[pindex]) * prob0[pindex]
+ ans[pindex] = 1 + qgeom((p[pindex] - Pobs0) / (1 - Pobs0),
+ prob = prob[pindex])
+ }
+
+ ans[pstr0 < deflat_limit] = NaN
+ ans[pstr0 > 1] = NaN
+
+ ans
+}
+
+
+
+rzigeom = function(n, prob, pstr0 = 0) {
+ use.n = if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+
+
+ pstr0 = rep(pstr0, len = use.n)
+ prob = rep(prob, len = use.n)
+
+
+ ans = rgeom(use.n, prob)
+ ans[runif(use.n) < pstr0] = 0
+
+
+ prob0 = prob
+ deflat_limit = -prob0 / (1 - prob0)
+ ind0 = (deflat_limit <= pstr0) & (pstr0 < 0)
+ if (any(ind0)) {
+ pobs0 = pstr0[ind0] + (1 - pstr0[ind0]) * prob0[ind0]
+ ans[ind0] = 1 + rgeom(sum(ind0), prob = prob[ind0])
+ ans[ind0] = ifelse(runif(sum(ind0)) < pobs0, 0, ans[ind0])
+ }
+
+ ans[pstr0 < deflat_limit] = NaN
+ ans[pstr0 > 1] = NaN
+
+
+ ans
+}
+
+
+
+
+
+ zigeometric = function(lprob = "logit", eprob = list(),
+ lpstr0 = "logit", epstr0 = list(),
+ iprob = NULL, ipstr0 = NULL,
+ imethod = 1,
+ bias.red = 0.5,
+ zero = 2)
+{
+
+
+ expected = TRUE
+
+ if (mode(lprob) != "character" && mode(lprob) != "name")
+ lprob = as.character(substitute(lprob))
+ if (mode(lpstr0) != "character" && mode(lpstr0) != "name")
+ lpstr0 = as.character(substitute(lpstr0))
+
+ if (!is.list(eprob)) eprob = list()
+ if (!is.list(epstr0)) epstr0 = list()
+
+
+ if (length(iprob))
+ if (!is.Numeric(iprob, positive = TRUE) ||
+ iprob >= 1)
+ stop("argument 'iprob' is out of range")
+ if (length(ipstr0))
+ if (!is.Numeric(ipstr0, positive = TRUE) ||
+ ipstr0 >= 1)
+ stop("argument 'ipstr0' is out of range")
+
+ if (!is.Numeric(bias.red, allowable.length = 1, positive = TRUE) ||
+ bias.red > 1)
+ stop("argument 'bias.red' must be between 0 and 1")
+
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 3)
+ stop("argument 'imethod' must be 1 or 2 or 3")
+
+
+ new("vglmff",
+ blurb = c("Zero-inflated geometric distribution,\n",
+ "P[Y = 0] = pstr0 + (1 - pstr0) * prob,\n",
+ "P[Y = y] = (1 - pstr0) * prob * (1 - prob)^y, ",
+ "y = 1, 2, ...\n\n",
+ "Link: ",
+ namesof("prob", lprob, earg = eprob ), ", ",
+ namesof("pstr0", lpstr0, earg = epstr0), "\n",
+ "Mean: (1 - pstr0) * (1 - prob) / prob"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ zero = .zero)
+ }, list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a 1-column matrix")
+
+ if (any(y < 0))
+ stop("all responses must be >= 0")
+ if (any(y != round(y)))
+ stop("response should be integer-valued")
+
+ predictors.names =
+ c(namesof("prob", .lprob, earg = .earg, tag = FALSE),
+ namesof("pstr0", .lpstr0, earg = .epstr0, tag = FALSE))
+
+ if (!length(etastart)) {
+ prob.init = if ( .imethod == 3)
+ .bias.red / (1 + y + 1/8) else
+ if ( .imethod == 2)
+ .bias.red / (1 + mean(y) + 1/8) else
+ .bias.red / (1 + weighted.mean(y, w) + 1/8)
+ prob.init = if (length( .iprob )) {
+ rep( .iprob, len = n)
+ } else {
+ rep(prob.init, len = n)
+ }
+
+
+ prob0.est = sum(w[y == 0]) / sum(w)
+ psze.init = if ( .imethod == 3)
+ prob0.est / 2 else
+ if ( .imethod == 1)
+ max(0.05, (prob0.est - median(prob.init))) else
+ prob0.est / 5
+ psze.init = if (length( .ipstr0 )) {
+ rep( .ipstr0 , len = n)
+ } else {
+ rep( psze.init, len = n)
+ }
+
+
+
+ etastart =
+ cbind(theta2eta(prob.init, .lprob, earg = .eprob),
+ theta2eta(psze.init, .lpstr0, earg = .epstr0))
+
+ }
+ }), list( .lprob = lprob, .lpstr0 = lpstr0,
+ .eprob = eprob, .epstr0 = epstr0,
+ .iprob = iprob, .ipstr0 = ipstr0,
+ .bias.red = bias.red,
+ .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ prob = eta2theta(eta[, 1], .lprob, earg = .eprob)
+ pstr0 = eta2theta(eta[, 2], .lpstr0 , earg = .epstr0 )
+ (1 - pstr0) * (1 - prob) / prob
+ }, list( .lprob = lprob, .lpstr0 = lpstr0,
+ .eprob = eprob, .epstr0 = epstr0 ))),
+ last = eval(substitute(expression({
+ misc$link = c(prob = .lprob, pstr0 = .lpstr0 )
+ misc$earg = list(prob = .eprob, pstr0 = .epstr0 )
+ misc$imethod = .imethod
+ misc$zero = .zero
+ misc$bias.red = .bias.red
+ misc$expected = .expected
+ misc$ipstr0 = .ipstr0
+
+
+ }), list( .lprob = lprob, .lpstr0 = lpstr0,
+ .eprob = eprob, .epstr0 = epstr0,
+ .ipstr0 = ipstr0,
+ .zero = zero,
+ .expected = expected,
+ .bias.red = bias.red,
+ .imethod = imethod ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ prob = eta2theta(eta[, 1], .lprob, earg = .eprob)
+ pstr0 = eta2theta(eta[, 2], .lpstr0 , earg = .epstr0 )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * dzigeom(x = y, prob = prob, pstr0 = pstr0, log = TRUE))
+ }
+ }, list( .lprob = lprob, .lpstr0 = lpstr0,
+ .eprob = eprob, .epstr0 = epstr0 ))),
+ vfamily = c("zigeometric"),
+
+ deriv = eval(substitute(expression({
+ prob = eta2theta(eta[, 1], .lprob, earg = .eprob)
+ pstr0 = eta2theta(eta[, 2], .lpstr0 , earg = .epstr0 )
+
+
+ prob0 = prob # P(Y == 0)
+ tmp8 = pstr0 + (1 - pstr0) * prob0
+ index0 = (y == 0)
+
+ dl.dpstr0 = (1 - prob0) / tmp8
+ dl.dpstr0[!index0] = -1 / (1 - pstr0[!index0])
+
+ dl.dprob = (1 - pstr0) / tmp8
+ dl.dprob[!index0] = 1 / prob[!index0] -
+ y[!index0] / (1 - prob[!index0])
+
+ dprob.deta = dtheta.deta(prob, .lprob, earg = .eprob )
+ dpstr0.deta = dtheta.deta(pstr0 , .lpstr0 , earg = .epstr0 )
+
+ dl.deta12 =
+ c(w) * cbind(dl.dprob * dprob.deta,
+ dl.dpstr0 * dpstr0.deta)
+ dl.deta12
+ }), list( .lprob = lprob, .lpstr0 = lpstr0,
+ .eprob = eprob, .epstr0 = epstr0 ))),
+ weight = eval(substitute(expression({
+ ed2l.dprob2 = (1 - pstr0) * (1 / (prob^2 * (1 - prob)) +
+ (1 - pstr0) / tmp8)
+ ed2l.dpstr0.prob = 1 / tmp8
+ ed2l.dpstr02 = (1 - prob0) / ((1 - pstr0) * tmp8)
+
+ od2l.dprob2 = ((1 - pstr0) / tmp8)^2
+ od2l.dprob2[!index0] = 1 / (prob[!index0])^2 +
+ y[!index0] / (1 - prob[!index0])^2
+ od2l.dpstr0.prob = (tmp8 + (1 - prob0) * (1 - pstr0)) / tmp8^2
+ od2l.dpstr0.prob[!index0] = 0
+
+
+ od2l.dpstr02 = ((1 - prob0) / tmp8)^2
+ od2l.dpstr02[!index0] = 1 / (1 - pstr0[!index0])^2
+
+
+ wz = matrix(as.numeric(NA), nrow = n, ncol = dimm(M))
+ if ( .expected ) {
+ wz[,iam(1,1,M)] = ed2l.dprob2 * dprob.deta^2
+ wz[,iam(2,2,M)] = ed2l.dpstr02 * dpstr0.deta^2
+ wz[,iam(1,2,M)] = ed2l.dpstr0.prob * dprob.deta * dpstr0.deta
+ } else {
+ wz[,iam(1,1,M)] = od2l.dprob2 * dprob.deta^2
+ wz[,iam(2,2,M)] = od2l.dpstr02 * dpstr0.deta^2
+ wz[,iam(1,2,M)] = od2l.dpstr0.prob * dprob.deta * dpstr0.deta
+ }
+
+
+ c(w) * wz
+ }), list( .lprob = lprob, .lpstr0 = lpstr0,
+ .expected = expected,
+ .eprob = eprob, .epstr0 = epstr0 ))))
+}
+
+
+
+
+
+
+dzageom = function(x, prob, pobs0 = 0, log = FALSE) {
+ if (!is.logical(log.arg <- log))
+ stop("bad input for argument 'log'")
+ rm(log)
- if (!length( .ilambda ))
- mat1[, jay] <- Lambda.init
- if (!length( .iprobp. ))
- mat2[, jay] <- Phimat.init
- }
+ LLL = max(length(x), length(prob), length(pobs0))
+ if (length(x) != LLL) x = rep(x, len = LLL);
+ if (length(prob) != LLL) prob = rep(prob, len = LLL);
+ if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
+ ans = rep(0.0, len = LLL)
+ if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be in [0,1]")
+ index0 = (x == 0)
- etastart <- cbind(theta2eta( mat1, .llambda, .elambda ),
- theta2eta(1 - mat2, .lprobp., .eprobp. ))
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ if (log.arg) {
+ ans[ index0] = log(pobs0[index0])
+ ans[!index0] = log1p(-pobs0[!index0]) +
+ dposgeom(x[!index0],
+ prob = prob[!index0], log = TRUE)
+ } else {
+ ans[ index0] = pobs0[index0]
+ ans[!index0] = (1-pobs0[!index0]) *
+ dposgeom(x[!index0],
+ prob = prob[!index0])
+ }
+ ans
+}
- mustart <- NULL # Since etastart has been computed.
- }
- }), list( .lprobp. = lprobp., .llambda = llambda,
- .eprobp. = eprobp., .elambda = elambda,
- .iprobp. = iprobp., .ilambda = ilambda,
- .imethod = imethod, .sinit = shrinkage.init ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- ncoly <- extra$ncoly
- lambda <- eta2theta(eta[, 2*(1:ncoly) - 1], .llambda, earg = .elambda )
- probp. <- eta2theta(eta[, 2*(1:ncoly) ], .lprobp., earg = .eprobp. )
- probp. * lambda
- }, list( .lprobp. = lprobp., .llambda = llambda,
- .eprobp. = eprobp., .elambda = elambda ))),
- last = eval(substitute(expression({
- Musual <- extra$Musual
- misc$link <- c(rep( .llambda, length = ncoly),
- rep( .lprobp., length = ncoly))
- misc$link <- misc$link[interleave.VGAM(Musual * ncoly, M = Musual)]
- temp.names <- c(mynames1, mynames2)
- temp.names <- temp.names[interleave.VGAM(Musual * ncoly, M = Musual)]
- names(misc$link) <- temp.names
- misc$earg <- vector("list", Musual * ncoly)
- names(misc$earg) <- temp.names
- for(ii in 1:ncoly) {
- misc$earg[[Musual*ii-1]] <- .elambda
- misc$earg[[Musual*ii ]] <- .eprobp.
- }
+pzageom = function(q, prob, pobs0 = 0) {
- misc$Musual <- Musual
- misc$imethod <- .imethod
+ LLL = max(length(q), length(prob), length(pobs0))
+ if (length(q) != LLL) q = rep(q, len = LLL);
+ if (length(prob) != LLL) prob = rep(prob, len = LLL);
+ if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
+ ans = rep(0.0, len = LLL)
+ if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be in [0,1]")
- misc$prob0 <- (1 - probp.) + probp. * exp(-lambda) # P(Y=0)
- }), list( .lprobp. = lprobp., .llambda = llambda,
- .eprobp. = eprobp., .elambda = elambda,
- .imethod = imethod ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- smallno <- 100 * .Machine$double.eps
- ncoly <- extra$ncoly
- lambda <- eta2theta(eta[, 2*(1:ncoly) - 1], .llambda, earg = .elambda )
- probp. <- eta2theta(eta[, 2*(1:ncoly) ], .lprobp., earg = .eprobp. )
+ ans[q > 0] = pobs0[q > 0] +
+ (1 - pobs0[q > 0]) *
+ pposgeom(q[q > 0], prob = prob[q > 0])
+ ans[q < 0] = 0
+ ans[q == 0] = pobs0[q == 0]
+ ans
+}
- probp. <- pmax(probp., smallno)
- probp. <- pmin(probp., 1.0 - smallno)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dzipois(x = y, phi = 1 - probp., lambda = lambda, log = TRUE))
- }
- }, list( .lprobp. = lprobp., .llambda = llambda,
- .eprobp. = eprobp., .elambda = elambda ))),
- vfamily = c("zipoissonff"),
- deriv = eval(substitute(expression({
- Musual <- extra$Musual
- ncoly <- extra$ncoly
- lambda <- eta2theta(eta[, 2*(1:ncoly) - 1], .llambda, earg = .elambda )
- probp. <- eta2theta(eta[, 2*(1:ncoly) ], .lprobp., earg = .eprobp. )
+qzageom = function(p, prob, pobs0 = 0) {
- smallno <- 100 * .Machine$double.eps
- probp. <- pmax(probp., smallno)
- probp. <- pmin(probp., 1.0 - smallno)
+ LLL = max(length(p), length(prob), length(pobs0))
+ if (length(p) != LLL) p = rep(p, len = LLL);
+ if (length(prob) != LLL) prob = rep(prob, len = LLL);
+ if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
- dlambda.deta <- dtheta.deta(lambda, .llambda, earg = .elambda )
- dprobp..deta <- dtheta.deta(probp., .lprobp., earg = .eprobp. )
+ if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be in [0,1]")
- tmp8 <- 1 + probp. * expm1(-lambda)
- ind0 <- (y == 0)
- dl.dlambda <- -probp. * exp(-lambda) / tmp8
- dl.dlambda[!ind0] <- (y[!ind0] - lambda[!ind0]) / lambda[!ind0]
- dl.dprobp. <- expm1(-lambda) / tmp8
- dl.dprobp.[!ind0] <- 1 / probp.[!ind0]
+ ans = p
+ ind4 = (p > pobs0)
+ ans[!ind4] = 0.0
+ ans[ ind4] = qposgeom((p[ind4] - pobs0[ind4]) / (1 - pobs0[ind4]),
+ prob = prob[ind4])
+ ans
+}
- ans <- c(w) * cbind(dl.dlambda * dlambda.deta,
- dl.dprobp. * dprobp..deta)
- if (FALSE && .llambda == "loge" &&
- (any(lambda[!ind0] < .Machine$double.eps))) {
- ans[!ind0, 2] <- w[!ind0] * (y[!ind0] - lambda[!ind0])
- }
- ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
- ans
- }), list( .lprobp. = lprobp., .llambda = llambda,
- .eprobp. = eprobp., .elambda = elambda ))),
- weight = eval(substitute(expression({
+rzageom = function(n, prob, pobs0 = 0) {
+ use.n = if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE, allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
- wz <- matrix(0, nrow = n, ncol = M + M-1)
- d2l.dlambda2 <- ( probp.) / lambda -
- probp. * (1 - probp.) * exp(-lambda) / tmp8
- d2l.dprobp.2 <- -expm1(-lambda) / (( probp.) * tmp8)
- d2l.dphilambda <- -exp(-lambda) / tmp8
+ ans = rposgeom(use.n, prob)
+ if (length(pobs0) != use.n) pobs0 = rep(pobs0, len = use.n)
+ if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be between 0 and 1 inclusive")
+ ifelse(runif(use.n) < pobs0, 0, ans)
+}
- if (ncoly == 1) { # Make sure these are matrices
- d2l.dlambda2 <- cbind(d2l.dlambda2)
- d2l.dprobp.2 <- cbind(d2l.dprobp.2)
- dlambda.deta <- cbind(dlambda.deta)
- dprobp..deta <- cbind(dprobp..deta)
- d2l.dphilambda <- cbind(d2l.dphilambda)
- }
- for (ii in 1:ncoly) {
- wz[, iam(2*ii - 1, 2*ii - 1, M)] <- d2l.dlambda2[, ii] *
- dlambda.deta[, ii]^2
- wz[, iam(2*ii , 2*ii , M)] <- d2l.dprobp.2[, ii] *
- dprobp..deta[, ii]^2
- wz[, iam(2*ii - 1, 2*ii , M)] <- d2l.dphilambda[, ii] *
- dprobp..deta[, ii] *
- dlambda.deta[, ii]
- if (FALSE && .llambda == "loge" &&
- (any(lambda[!ind0] < .Machine$double.eps))) {
- ind5 <- !ind0 & (lambda < .Machine$double.eps)
- if (any(ind5))
- wz[ind5,iam(1, 1, M)] <- (1 - probp.[ind5]) * .Machine$double.eps
- }
- }
- c(w) * wz
- }), list( .llambda = llambda ))))
-}
@@ -1849,281 +2873,556 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
-dzigeom = function(x, prob, pszero = 0, log = FALSE) {
+dzabinom = function(x, size, prob, pobs0 = 0, log = FALSE) {
if (!is.logical(log.arg <- log))
stop("bad input for argument 'log'")
rm(log)
- LLL = max(length(x), length(prob), length(pszero))
- if (length(x) != LLL) x = rep(x, len = LLL);
- if (length(prob) != LLL) prob = rep(prob, len = LLL);
- if (length(pszero) != LLL) pszero = rep(pszero, len = LLL);
-
-
- ans = dgeom(x = x, prob = prob, log = TRUE)
-
- ans[pszero < 0] = NaN
- ans[pszero > 1] = NaN
+ LLL = max(length(x), length(size), length(prob), length(pobs0))
+ if (length(x) != LLL) x = rep(x, len = LLL);
+ if (length(size) != LLL) size = rep(size, len = LLL);
+ if (length(prob) != LLL) prob = rep(prob, len = LLL);
+ if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
+ ans = rep(0.0, len = LLL)
+ if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be in [0,1]")
+ index0 = (x == 0)
if (log.arg) {
- ifelse(x == 0, log(pszero + (1-pszero) * exp(ans)),
- log1p(-pszero) + ans)
+ ans[ index0] = log(pobs0[index0])
+ ans[!index0] = log1p(-pobs0[!index0]) +
+ dposbinom(x[!index0], size = size[!index0],
+ prob = prob[!index0], log = TRUE)
} else {
- ifelse(x == 0, pszero + (1-pszero) * exp(ans) ,
- (1-pszero) * exp(ans))
+ ans[ index0] = pobs0[index0]
+ ans[!index0] = (1-pobs0[!index0]) *
+ dposbinom(x[!index0], size = size[!index0],
+ prob = prob[!index0])
}
+ ans
}
-pzigeom = function(q, prob, pszero = 0) {
- answer = pgeom(q, prob)
- LLL = max(length(pszero), length(answer))
- if (length(pszero) != LLL) pszero = rep(pszero, len = LLL);
- if (length(answer) != LLL) answer = rep(answer, len = LLL);
+pzabinom = function(q, size, prob, pobs0 = 0) {
+
+ LLL = max(length(q), length(size), length(prob), length(pobs0))
+ if (length(q) != LLL) q = rep(q, len = LLL);
+ if (length(size) != LLL) size = rep(size, len = LLL);
+ if (length(prob) != LLL) prob = rep(prob, len = LLL);
+ if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
+ ans = rep(0.0, len = LLL)
+ if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be in [0,1]")
- answer = ifelse(q < 0, 0, pszero + (1-pszero) * answer)
- answer[pszero < 0] = NaN
- answer[pszero > 1] = NaN
- answer
+ ans[q > 0] = pobs0[q > 0] +
+ (1 - pobs0[q > 0]) *
+ pposbinom(q[q > 0], size = size[q > 0], prob = prob[q > 0])
+ ans[q < 0] = 0
+ ans[q == 0] = pobs0[q == 0]
+ ans
}
+qzabinom = function(p, size, prob, pobs0 = 0) {
-qzigeom = function(p, prob, pszero = 0) {
- LLL = max(length(p), length(prob), length(pszero))
- answer = p = rep(p, len = LLL)
- prob = rep(prob, len = LLL)
- pszero = rep(pszero, len = LLL)
- answer[p <= pszero] = 0
- ind1 = (p > pszero)
- answer[ind1] =
- qgeom((p[ind1] - pszero[ind1]) / (1 - pszero[ind1]),
- prob = prob[ind1])
- answer[pszero < 0] = NaN
- answer[pszero > 1] = NaN
- answer
-}
+ LLL = max(length(p), length(size), length(prob), length(pobs0))
+ if (length(p) != LLL) p = rep(p, len = LLL);
+ if (length(size) != LLL) size = rep(size, len = LLL);
+ if (length(prob) != LLL) prob = rep(prob, len = LLL);
+ if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
+ if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be in [0,1]")
+
+ ans = p
+ ind4 = (p > pobs0)
+ ans[!ind4] = 0.0
+ ans[ ind4] = qposbinom((p[ind4] - pobs0[ind4]) / (1 - pobs0[ind4]),
+ size = size[ind4],
+ prob = prob[ind4])
+ ans
+}
-rzigeom = function(n, prob, pszero = 0) {
+rzabinom = function(n, size, prob, pobs0 = 0) {
use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
stop("bad input for argument 'n'") else n
- ans = rgeom(use.n, prob)
- pszero = rep(pszero, len = length(ans))
- if (!is.Numeric(pszero) || any(pszero < 0) || any(pszero > 1))
- stop("argument 'pszero' must be between 0 and 1 inclusive")
-
- ans[runif(use.n) < pszero] = 0
- ans
+ ans = rposbinom(use.n, size, prob)
+ if (length(pobs0) != use.n) pobs0 = rep(pobs0, len = use.n)
+ if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
+ stop("argument 'pobs0' must be between 0 and 1 inclusive")
+ ifelse(runif(use.n) < pobs0, 0, ans)
}
- zigeometric = function(lprob = "logit", eprob = list(),
- lpszero = "logit", epszero = list(),
- iprob = NULL, ipszero = NULL,
- imethod = 1,
- bias.red = 0.5,
- zero = 2)
+ zabinomial = function(lprob = "logit", eprob = list(),
+ lpobs0 = "logit", epobs0 = list(),
+ iprob = NULL, ipobs0 = NULL,
+ imethod = 1,
+ zero = 2)
{
- expected = TRUE
+
if (mode(lprob) != "character" && mode(lprob) != "name")
lprob = as.character(substitute(lprob))
- if (mode(lpszero) != "character" && mode(lpszero) != "name")
- lpszero = as.character(substitute(lpszero))
-
- if (!is.list(eprob)) eprob = list()
- if (!is.list(epszero)) epszero = list()
+ if (mode(lpobs0) != "character" && mode(lpobs0) != "name")
+ lpobs0 = as.character(substitute(lpobs0))
+ if (!is.list(eprob)) eprob = list()
+ if (!is.list(epobs0)) epobs0 = list()
if (length(iprob))
- if (!is.Numeric(iprob, posit = TRUE) ||
+ if (!is.Numeric(iprob, positive = TRUE) ||
iprob >= 1)
stop("argument 'iprob' is out of range")
- if (length(ipszero))
- if (!is.Numeric(ipszero, posit = TRUE) ||
- ipszero >= 1)
- stop("argument 'ipszero' is out of range")
+ if (length(ipobs0))
+ if (!is.Numeric(ipobs0, positive = TRUE) ||
+ ipobs0 >= 1)
+ stop("argument 'ipobs0' is out of range")
- if (!is.Numeric(bias.red, allow = 1, posit = TRUE) ||
- bias.red > 1)
- stop("argument 'bias.red' must be between 0 and 1")
- if (!is.Numeric(imethod, allow = 1, integ = TRUE, posit = TRUE) ||
- imethod > 3)
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 3)
stop("argument 'imethod' must be 1 or 2 or 3")
new("vglmff",
- blurb = c("Zero-inflated geometric distribution,\n",
- "P[Y = 0] = pszero + (1 - pszero) * prob,\n",
- "P[Y = y] = (1 - pszero) * prob * (1 - prob)^y, ",
- "y = 1, 2, ...\n\n",
+ blurb = c("Zero-altered binomial distribution ",
+ "(Bernoulli and positive-binomial conditional model)\n\n",
+ "P[Y = 0] = pobs0,\n",
+ "P[Y = y] = (1 - pobs0) * dposbinom(x = y, size, prob), ",
+ "y = 1, 2, ..., size,\n\n",
"Link: ",
- namesof("prob", lprob, earg = eprob), ", ",
- namesof("pszero", lpszero, earg = epszero), "\n",
- "Mean: (1 - pszero) * (1 - prob) / prob"),
+ namesof("prob" , lprob, earg = eprob), ", ",
+ namesof("pobs0", lpobs0, earg = epobs0), "\n",
+ "Mean: (1 - pobs0) * prob / (1 - (1 - prob)^size)"),
constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ zero = .zero)
+ }, list( .zero = zero ))),
initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a 1-column matrix")
+ if (!all(w == 1))
+ extra$orig.w = w
+
+
+
+ {
+ NCOL = function (x)
+ if (is.array(x) && length(dim(x)) > 1 ||
+ is.data.frame(x)) ncol(x) else as.integer(1)
+
+ if (NCOL(y) == 1) {
+ if (is.factor(y)) y <- y != levels(y)[1]
+ nn = rep(1, n)
+ if (!all(y >= 0 & y <= 1))
+ stop("response values must be in [0, 1]")
+ if (!length(mustart) && !length(etastart))
+ mustart = (0.5 + w * y) / (1.0 + w)
+
+
+ no.successes = y
+ if (min(y) < 0)
+ stop("Negative data not allowed!")
+ if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
+ stop("Number of successes must be integer-valued")
+
+ } else if (NCOL(y) == 2) {
+ if (min(y) < 0)
+ stop("Negative data not allowed!")
+ if (any(abs(y - round(y)) > 1.0e-8))
+ stop("Count data must be integer-valued")
+ y = round(y)
+ nvec = y[, 1] + y[, 2]
+ y = ifelse(nvec > 0, y[, 1] / nvec, 0)
+ w = w * nvec
+ if (!length(mustart) && !length(etastart))
+ mustart = (0.5 + nvec * y) / (1 + nvec)
+ } else {
+ stop("for the binomialff family, response 'y' must be a ",
+ "vector of 0 and 1's\n",
+ "or a factor ",
+ "(first level = fail, other levels = success),\n",
+ "or a 2-column matrix where col 1 is the no. of ",
+ "successes and col 2 is the no. of failures")
+ }
+
+ }
+ if (!all(w == 1))
+ extra$new.w = w
+
+
+ y = as.matrix(y)
+ extra$y0 = y0 = ifelse(y == 0, 1, 0)
+ extra$NOS = NOS = ncoly = ncol(y) # Number of species
+ extra$skip.these = skip.these = matrix(as.logical(y0), n, NOS)
+
- if (any(y < 0))
- stop("all responses must be >= 0")
- if (any(y != round(y)))
- stop("response should be integer-valued")
predictors.names =
- c(namesof("prob", .lprob, earg = .earg, tag = FALSE),
- namesof("pszero", .lpszero, earg = .epszero, tag = FALSE))
+ c(namesof("prob" , .lprob , earg = .eprob , tag = FALSE),
+ namesof("pobs0", .lpobs0 , earg = .epobs0 , tag = FALSE))
- if (!length(etastart)) {
- prob.init = if ( .imethod == 3)
- .bias.red / (1 + y + 1/8) else
- if ( .imethod == 2)
- .bias.red / (1 + mean(y) + 1/8) else
- .bias.red / (1 + weighted.mean(y, w) + 1/8)
- prob.init = if(length( .iprob )) {
- rep( .iprob, len = n)
- } else {
- rep(prob.init, len = n)
- }
+ orig.w = if (length(extra$orig.w)) extra$orig.w else 1
+ new.w = if (length(extra$new.w)) extra$new.w else 1
+ Size = new.w / orig.w
- prob0.est = sum(w[y == 0]) / sum(w)
- psze.init = if ( .imethod == 3)
- prob0.est / 2 else
- if ( .imethod == 1)
- max(0.05, (prob0.est - median(prob.init))) else
- prob0.est / 5
- psze.init = if(length( .ipszero )) {
- rep( .ipszero, len = n)
- } else {
- rep( psze.init, len = n)
- }
+ phi.init = if (length( .ipobs0 )) .ipobs0 else {
+ prob0.est = sum(Size[y == 0]) / sum(Size)
+ if ( .imethod == 1) {
+ (prob0.est - (1 - mustart)^Size) / (1 - (1 - mustart)^Size)
+ } else
+ if ( .imethod == 2) {
+ prob0.est
+ } else {
+ prob0.est * 0.5
+ }
+ }
+
+ phi.init[phi.init <= -0.10] = 0.50 # Lots of sample variation
+ phi.init[phi.init <= 0.01] = 0.05 # Last resort
+ phi.init[phi.init >= 0.99] = 0.95 # Last resort
+
+ if (!length(etastart)) {
etastart =
- cbind(theta2eta(prob.init, .lprob, earg = .eprob),
- theta2eta(psze.init, .lpszero, earg = .epszero))
+ cbind(theta2eta( mustart, .lprob, earg = .eprob ),
+ theta2eta(phi.init, .lpobs0, earg = .epobs0 ))
+ mustart <- NULL
}
- }), list( .lprob = lprob, .lpszero = lpszero,
- .eprob = eprob, .epszero = epszero,
- .iprob = iprob, .ipszero = ipszero,
- .bias.red = bias.red,
+ }), list( .lprob = lprob, .lpobs0 = lpobs0,
+ .eprob = eprob, .epobs0 = epobs0,
+ .iprob = iprob, .ipobs0 = ipobs0,
.imethod = imethod ))),
+
linkinv = eval(substitute(function(eta, extra = NULL) {
- prob = eta2theta(eta[, 1], .lprob, earg = .eprob)
- pszero = eta2theta(eta[, 2], .lpszero , earg = .epszero )
- (1 - pszero) * (1 - prob) / prob
- }, list( .lprob = lprob, .lpszero = lpszero,
- .eprob = eprob, .epszero = epszero ))),
+ prob = eta2theta(eta[, 1], .lprob, earg = .eprob )
+ phi0 = eta2theta(eta[, 2], .lpobs0, earg = .epobs0 )
+ orig.w = if (length(extra$orig.w)) extra$orig.w else 1
+ new.w = if (length(extra$new.w)) extra$new.w else 1
+ Size = new.w / orig.w
+ (1 - phi0) * prob / (1 - (1 - prob)^Size)
+ }, list( .lprob = lprob, .lpobs0 = lpobs0,
+ .eprob = eprob, .epobs0 = epobs0 ))),
+
last = eval(substitute(expression({
- misc$link = c(prob = .lprob, pszero = .lpszero )
- misc$earg = list(prob = .eprob, pszero = .epszero )
+ misc$link = c(prob = .lprob, pobs0 = .lpobs0 )
+ misc$earg = list(prob = .eprob, pobs0 = .epobs0 )
misc$imethod = .imethod
misc$zero = .zero
- misc$bias.red = .bias.red
- misc$expected = .expected
-
-
- }), list( .lprob = lprob, .lpszero = lpszero,
- .eprob = eprob, .epszero = epszero,
+ misc$expected = TRUE
+ }), list( .lprob = lprob, .lpobs0 = lpobs0,
+ .eprob = eprob, .epobs0 = epobs0,
.zero = zero,
- .expected = expected,
- .bias.red = bias.red,
.imethod = imethod ))),
+
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- prob = eta2theta(eta[, 1], .lprob, earg = .eprob)
- pszero = eta2theta(eta[, 2], .lpszero , earg = .epszero )
+ orig.w = if (length(extra$orig.w)) extra$orig.w else 1
+ new.w = if (length(extra$new.w)) extra$new.w else 1
+ Size = new.w / orig.w
+ prob = eta2theta(eta[, 1], .lprob , earg = .eprob )
+ pobs0 = eta2theta(eta[, 2], .lpobs0 , earg = .epobs0 )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dzigeom(x = y, prob = prob, pszero = pszero, log = TRUE))
+ sum(orig.w * dzabinom(x = round(y * Size), size = Size,
+ prob = prob, pobs0 = pobs0,
+ log = TRUE))
}
- }, list( .lprob = lprob, .lpszero = lpszero,
- .eprob = eprob, .epszero = epszero ))),
- vfamily = c("zigeometric"),
+ }, list( .lprob = lprob, .lpobs0 = lpobs0,
+ .eprob = eprob, .epobs0 = epobs0 ))),
+ vfamily = c("zabinomial"),
deriv = eval(substitute(expression({
- prob = eta2theta(eta[, 1], .lprob, earg = .eprob)
- pszero = eta2theta(eta[, 2], .lpszero , earg = .epszero )
+ NOS = if (length(extra$NOS)) extra$NOS else 1
+ Musual = 2
+ orig.w = if (length(extra$orig.w)) extra$orig.w else 1
+ new.w = if (length(extra$new.w)) extra$new.w else 1
+ Size = new.w / orig.w
- prob0 = prob # P(Y == 0)
- tmp8 = pszero + (1 - pszero) * prob0
- index0 = (y == 0)
+ prob = eta2theta(eta[, 1], .lprob , earg = .eprob )
+ phi0 = eta2theta(eta[, 2], .lpobs0 , earg = .epobs0 )
- dl.dpszero = (1 - prob0) / tmp8
- dl.dpszero[!index0] = -1 / (1 - pszero[!index0])
+ dprob.deta = dtheta.deta(prob, .lprob , earg = .eprob )
+ dphi0.deta = dtheta.deta(phi0, .lpobs0, earg = .epobs0 )
- dl.dprob = (1 - pszero) / tmp8
- dl.dprob[!index0] = 1 / prob[!index0] -
- y[!index0] / (1 - prob[!index0])
+ df0.dprob = -Size * (1 - prob)^(Size - 1)
+ df02.dprob2 = Size * (Size - 1) * (1 - prob)^(Size - 2)
+ prob0 = (1 - prob)^(Size)
+ oneminusf0 = 1 - prob0
+
+
+ dl.dprob = c(w) * (y / prob - (1 - y) / (1 - prob)) +
+ c(orig.w) * df0.dprob / oneminusf0
+ dl.dphi0 = -1 / (1 - phi0)
+
+
+ dl.dphi0[y == 0] = 1 / phi0[y == 0] # Do it in one line
+ skip = extra$skip.these
+ for(spp. in 1:NOS) {
+ dl.dprob[skip[, spp.], spp.] = 0
+ }
+
+
+ ans <- cbind( dl.dprob * dprob.deta,
+ c(orig.w) * dl.dphi0 * dphi0.deta)
+
+ ans
+ }), list( .lprob = lprob, .lpobs0 = lpobs0,
+ .eprob = eprob, .epobs0 = epobs0 ))),
- dprob.deta = dtheta.deta(prob, .lprob, earg = .eprob )
- dpszero.deta = dtheta.deta(pszero , .lpszero , earg = .epszero )
- myderiv =
- c(w) * cbind(dl.dprob * dprob.deta,
- dl.dpszero * dpszero.deta)
- myderiv
- }), list( .lprob = lprob, .lpszero = lpszero,
- .eprob = eprob, .epszero = epszero ))),
weight = eval(substitute(expression({
- ed2l.dprob2 = (1 - pszero) * (1 / (prob^2 * (1 - prob)) +
- (1 - pszero) / tmp8)
- ed2l.dpszero.prob = 1 / tmp8
- ed2l.dpszero2 = (1 - prob0) / ((1 - pszero) * tmp8)
+ wz = matrix(0.0, n, Musual)
- od2l.dprob2 = ((1 - pszero) / tmp8)^2
- od2l.dprob2[!index0] = 1 / (prob[!index0])^2 +
- y[!index0] / (1 - prob[!index0])^2
- od2l.dpszero.prob = (tmp8 + (1 - prob0) * (1 - pszero)) / tmp8^2
- od2l.dpszero.prob[!index0] = 0
+ usualmeanY = prob
+ meanY = (1 - phi0) * usualmeanY / oneminusf0
- od2l.dpszero2 = ((1 - prob0) / tmp8)^2
- od2l.dpszero2[!index0] = 1 / (1 - pszero[!index0])^2
+ term1 = c(Size) * (meanY / prob^2 -
+ meanY / (1 - prob)^2) +
+ c(Size) * (1 - phi0) / (1 - prob)^2
+ term2 = -(1 - phi0) * df02.dprob2 / oneminusf0
+ term3 = -(1 - phi0) * (df0.dprob / oneminusf0)^2
+ ed2l.dprob2 = term1 + term2 + term3
+ wz[, iam(1,1,M)] = ed2l.dprob2 * dprob.deta^2
- wz = matrix(as.numeric(NA), nrow = n, ncol = dimm(M))
- if ( .expected ) {
- wz[,iam(1,1,M)] = ed2l.dprob2 * dprob.deta^2
- wz[,iam(2,2,M)] = ed2l.dpszero2 * dpszero.deta^2
- wz[,iam(1,2,M)] = ed2l.dpszero.prob * dprob.deta * dpszero.deta
+
+ mu.phi0 = phi0
+ tmp100 = mu.phi0 * (1.0 - mu.phi0)
+ tmp200 = if ( .lpobs0 == "logit" && is.empty.list( .epobs0 )) {
+ tmp100
} else {
- wz[,iam(1,1,M)] = od2l.dprob2 * dprob.deta^2
- wz[,iam(2,2,M)] = od2l.dpszero2 * dpszero.deta^2
- wz[,iam(1,2,M)] = od2l.dpszero.prob * dprob.deta * dpszero.deta
+ (dphi0.deta^2) / tmp100
}
+ wz[, iam(2,2,M)] = tmp200
+ c(orig.w) * wz
+ }), list( .lprob = lprob, .lpobs0 = lpobs0,
+ .eprob = eprob, .epobs0 = epobs0 ))))
+}
- c(w) * wz
- }), list( .lprob = lprob, .lpszero = lpszero,
- .expected = expected,
- .eprob = eprob, .epszero = epszero ))))
-}
+ zageometric = function(lpobs0 = "logit", lprob = "logit",
+ epobs0 = list(), eprob = list(),
+ imethod = 1,
+ ipobs0 = NULL, iprob = NULL,
+ zero = NULL) {
+
+
+ if (mode(lpobs0) != "character" && mode(lpobs0) != "name")
+ lpobs0 = as.character(substitute(lpobs0))
+ if (mode(lprob) != "character" && mode(lprob) != "name")
+ lprob = as.character(substitute(lprob))
+
+ if (!is.list(epobs0)) epobs0 = list()
+ if (!is.list(eprob)) eprob = list()
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 3)
+ stop("argument 'imethod' must be 1 or 2 or 3")
+ if (length(iprob))
+ if (!is.Numeric(iprob, positive = TRUE) ||
+ max(iprob) >= 1)
+ stop("argument 'iprob' out of range")
+ if (length(ipobs0))
+ if (!is.Numeric(ipobs0, positive = TRUE) ||
+ max(ipobs0) >= 1)
+ stop("argument 'ipobs0' out of range")
+
+
+ new("vglmff",
+ blurb = c("Zero-altered geometric ",
+ "(Bernoulli and positive-geometric conditional model)\n\n",
+ "Links: ",
+ namesof("pobs0", lpobs0, earg = epobs0, tag = FALSE), ", ",
+ namesof("prob" , lprob , earg = eprob , tag = FALSE), "\n",
+ "Mean: (1 - pobs0) / prob"),
+
+ constraints = eval(substitute(expression({
+
+ dotzero <- .zero
+ Musual <- 2
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ Musual <- 2
+ y <- as.matrix(y)
+ if (any(y != round(y )))
+ stop("the response must be integer-valued")
+ if (any(y < 0))
+ stop("the response must not have negative values")
+
+ extra$y0 = y0 = ifelse(y == 0, 1, 0)
+ extra$NOS = NOS = ncoly = ncol(y) # Number of species
+ extra$skip.these = skip.these = matrix(as.logical(y0), n, NOS)
+
+ mynames1 = if (ncoly == 1) "pobs0" else paste("pobs0", 1:ncoly, sep = "")
+ mynames2 = if (ncoly == 1) "prob" else paste("prob", 1:ncoly, sep = "")
+ predictors.names =
+ c(namesof(mynames1, .lpobs0 , earg = .epobs0 , tag = FALSE),
+ namesof(mynames2, .lprob , earg = .eprob , tag = FALSE))[
+ interleave.VGAM(Musual*NOS, M = Musual)]
+
+ if (!length(etastart)) {
+
+ foo = function(x) mean(as.numeric(x == 0))
+ phi0.init = matrix(apply(y, 2, foo), n, ncoly, byrow = TRUE)
+ if (length( .ipobs0 ))
+ phi0.init = matrix( .ipobs0 , n, ncoly, byrow = TRUE)
+
+
+ prob.init =
+ if ( .imethod == 2)
+ 1 / (1 + y + 1/16) else
+ if ( .imethod == 1)
+ (1 - phi0.init) / (1 + matrix(apply(y, 2, weighted.mean, w = w),
+ n, ncoly, byrow = TRUE) + 1/16) else
+ (1 - phi0.init) / (1 + matrix(apply(y, 2, median),
+ n, ncoly, byrow = TRUE) + 1/16)
+ if (length( .iprob ))
+ prob.init = matrix( .iprob , n, ncoly, byrow = TRUE)
+
+
+
+ etastart = cbind(theta2eta(phi0.init, .lpobs0 , earg = .epobs0 ),
+ theta2eta(prob.init, .lprob , earg = .eprob ))
+ etastart = etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ }
+ }), list( .lpobs0 = lpobs0, .lprob = lprob,
+ .epobs0 = epobs0, .eprob = eprob,
+ .ipobs0 = ipobs0, .iprob = iprob,
+ .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ NOS = extra$NOS
+ Musual <- 2
+
+ phi0 = cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ .lpobs0 , earg = .epobs0 ))
+ prob = cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ .lprob , earg = .eprob ))
+
+ (1 - phi0) / prob
+ }, list( .lpobs0 = lpobs0, .lprob = lprob,
+ .epobs0 = epobs0, .eprob = eprob ))),
+ last = eval(substitute(expression({
+ temp.names = c(rep( .lpobs0 , len = NOS),
+ rep( .lprob , len = NOS))
+ temp.names = temp.names[interleave.VGAM(Musual*NOS, M = Musual)]
+ misc$link = temp.names
+ misc$expected = TRUE
+ misc$earg = vector("list", Musual * NOS)
+ misc$imethod = .imethod
+ misc$ipobs0 = .ipobs0
+ misc$iprob = .iprob
+
+ names(misc$link) <-
+ names(misc$earg) <-
+ c(mynames1, mynames2)[interleave.VGAM(Musual*NOS, M = Musual)]
+
+ for(ii in 1:NOS) {
+ misc$earg[[Musual*ii-1]] = .epobs0
+ misc$earg[[Musual*ii ]] = .eprob
+ }
+ }), list( .lpobs0 = lpobs0, .lprob = lprob,
+ .epobs0 = epobs0, .eprob = eprob,
+ .ipobs0 = ipobs0, .iprob = iprob,
+ .imethod = imethod ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ NOS = extra$NOS
+ Musual <- 2
+
+ phi0 = cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ .lpobs0 , earg = .epobs0 ))
+ prob = cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ .lprob , earg = .eprob ))
+
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(w * dzageom(x = y, pobs0 = phi0, prob = prob, log = TRUE))
+ }
+ }, list( .lpobs0 = lpobs0, .lprob = lprob,
+ .epobs0 = epobs0, .eprob = eprob ))),
+ vfamily = c("zageometric"),
+ deriv = eval(substitute(expression({
+ Musual <- 2
+ NOS = extra$NOS
+ y0 = extra$y0
+ skip = extra$skip.these
+
+ phi0 = cbind(eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ .lpobs0 , earg = .epobs0 ))
+ prob = cbind(eta2theta(eta[, Musual*(1:NOS)-0, drop = FALSE],
+ .lprob , earg = .eprob ))
+
+
+ dl.dprob = 1 / prob - (y - 1) / (1 - prob)
+ dl.dphi0 = -1 / (1 - phi0)
+
+
+ for(spp. in 1:NOS) {
+ dl.dphi0[skip[, spp.], spp.] = 1 / phi0[skip[, spp.], spp.]
+ dl.dprob[skip[, spp.], spp.] = 0
+ }
+ dphi0.deta = dtheta.deta(phi0, .lpobs0 , earg = .epobs0 )
+ dprob.deta = dtheta.deta(prob, .lprob , earg = .eprob )
+
+
+ ans <- c(w) * cbind(dl.dphi0 * dphi0.deta,
+ dl.dprob * dprob.deta)
+ ans = ans[, interleave.VGAM(ncol(ans), M = Musual)]
+ ans
+ }), list( .lpobs0 = lpobs0, .lprob = lprob,
+ .epobs0 = epobs0, .eprob = eprob ))),
+ weight = eval(substitute(expression({
+
+ wz = matrix(0.0, n, Musual*NOS)
+
+
+ ed2l.dprob2 = (1 - phi0) / (prob^2 * (1 - prob))
+
+ wz[, NOS+(1:NOS)] = c(w) * ed2l.dprob2 * dprob.deta^2
+
+
+ mu.phi0 = phi0
+ tmp100 = mu.phi0 * (1.0 - mu.phi0)
+ tmp200 = if ( .lpobs0 == "logit" && is.empty.list( .epobs0 )) {
+ cbind(c(w) * tmp100)
+ } else {
+ cbind(c(w) * (dphi0.deta^2) / tmp100)
+ }
+ wz[, 1:NOS] = tmp200
+ wz = wz[, interleave.VGAM(ncol(wz), M = Musual)]
+ wz
+ }), list( .lpobs0 = lpobs0,
+ .epobs0 = epobs0 ))))
+} # End of zageometric
diff --git a/R/fittedvlm.R b/R/fittedvlm.R
index 88180f6..228ed5b 100644
--- a/R/fittedvlm.R
+++ b/R/fittedvlm.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -13,7 +13,7 @@
-fittedvlm <- function(object, matrix.arg=TRUE, ...)
+fittedvlm <- function(object, matrix.arg = TRUE, ...)
{
answer =
@@ -63,7 +63,7 @@ setMethod("fitted", "vglm",
fittedvlm(object, ...))
-predictors.vglm <- function(object, matrix=TRUE, ...)
+predictors.vglm <- function(object, matrix = TRUE, ...)
{
answer =
if (matrix)
diff --git a/R/formula.vlm.q b/R/formula.vlm.q
index 19f33be..610a68a 100644
--- a/R/formula.vlm.q
+++ b/R/formula.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -9,14 +9,15 @@
formulavlm = function(x, fnumber=1, ...) {
- if (!is.Numeric(fnumber, integ=TRUE, allow=1, posit=TRUE) ||
- fnumber > 2)
- stop("argument 'fnumber' must be 1 or 2")
+ if (!is.Numeric(fnumber, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE) ||
+ fnumber > 2)
+ stop("argument 'fnumber' must be 1 or 2")
- if (!any(slotNames(x) == "misc"))
- stop("cannot find slot 'misc'")
+ if (!any(slotNames(x) == "misc"))
+ stop("cannot find slot 'misc'")
- if (fnumber == 1) x at misc$formula else x at misc$form2
+ if (fnumber == 1) x at misc$formula else x at misc$form2
}
@@ -31,29 +32,29 @@ formulaNA.VGAM = function(x, ...) {
setMethod("formula", "vlm",
function(x, ...)
- formulavlm(x=x, ...))
+ formulavlm(x = x, ...))
setMethod("formula", "vglm",
function(x, ...)
- formulavlm(x=x, ...))
+ formulavlm(x = x, ...))
setMethod("formula", "vgam",
function(x, ...)
- formulavlm(x=x, ...))
+ formulavlm(x = x, ...))
setMethod("formula", "rrvglm",
function(x, ...)
- formulavlm(x=x, ...))
+ formulavlm(x = x, ...))
setMethod("formula", "qrrvglm",
function(x, ...)
- formulavlm(x=x, ...))
+ formulavlm(x = x, ...))
setMethod("formula", "grc",
function(x, ...)
- formulavlm(x=x, ...))
+ formulavlm(x = x, ...))
@@ -67,11 +68,11 @@ setMethod("formula", "grc",
variable.namesvlm <- function(object, full = FALSE, ...) {
qrslot <- object at qr
if (!length(qrslot$qr)) {
- use.this <- object at x
- if (!length(use.this))
- stop("argument 'object' has empty 'qr' and 'x' slots.")
+ use.this <- object at x
+ if (!length(use.this))
+ stop("argument 'object' has empty 'qr' and 'x' slots.")
} else {
- use.this = qrslot$qr
+ use.this = qrslot$qr
}
if (full) dimnames(use.this)[[2]] else
if (object at rank) dimnames(use.this)[[2]][seq_len(object at rank)] else
diff --git a/R/generic.q b/R/generic.q
index f78e504..8832317 100644
--- a/R/generic.q
+++ b/R/generic.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/links.q b/R/links.q
index 2922af8..44c2b31 100644
--- a/R/links.q
+++ b/R/links.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -469,24 +469,24 @@ TypicalVGAMlinkFunction <- function(theta,
short = TRUE, tag = FALSE)
{
- if (is.character(theta)) {
- string <- paste("-1/", theta, sep = "")
- if (tag)
- string <- paste("Negative inverse:", string)
- return(string)
- }
- if (inverse) {
- if (deriv > 0) {
- 1 / nreciprocal(theta, earg = earg, inverse = FALSE, deriv)
- } else {
- 1 / sqrt(-2*theta)
- }
+ if (is.character(theta)) {
+ string <- paste("-1/", theta, sep = "")
+ if (tag)
+ string <- paste("Negative inverse:", string)
+ return(string)
+ }
+ if (inverse) {
+ if (deriv > 0) {
+ 1 / nreciprocal(theta, earg = earg, inverse.arg = FALSE, deriv)
} else {
- switch(deriv+1,
- -1 / (2 * theta^2),
- theta^3,
- 3 * theta^5)
- }
+ 1 / sqrt(-2*theta)
+ }
+ } else {
+ switch(deriv+1,
+ -1 / (2 * theta^2),
+ theta^3,
+ 3 * theta^5)
+ }
}
@@ -575,10 +575,14 @@ fsqrt <- function(theta, earg = list(min = 0, max = 1, mux=sqrt(2)),
if (is.Numeric(earg$min)) min = earg$min
if (is.Numeric(earg$max)) max = earg$max
if (is.Numeric(earg$mux)) mux = earg$mux
- if (!is.Numeric(min,allow = 1)) stop("bad input for 'min' component")
- if (!is.Numeric(max,allow = 1)) stop("bad input for 'max' component")
- if (!is.Numeric(mux,allow = 1,posit = TRUE)) stop("bad input for 'mux' component")
- if (min >= max) stop("'min' >= 'max' is not allowed")
+ if (!is.Numeric(min, allowable.length = 1))
+ stop("bad input for 'min' component")
+ if (!is.Numeric(max, allowable.length = 1))
+ stop("bad input for 'max' component")
+ if (!is.Numeric(mux, allowable.length = 1, positive = TRUE))
+ stop("bad input for 'mux' component")
+ if (min >= max)
+ stop("'min' >= 'max' is not allowed")
if (is.character(theta)) {
string <- if (short)
@@ -888,12 +892,13 @@ nlogoff <- function(theta, earg = 0, inverse = FALSE, deriv = 0,
cutpoint = earg$cutpoint # Optional; if so then is a NULL
} else
stop("argument 'earg' must be a list")
- if (!is.Numeric(lambda, posit = TRUE))
+ if (!is.Numeric(lambda, positive = TRUE))
stop('could not determine lambda or lambda has negative values')
if (is.Numeric(cutpoint))
- if (any(cutpoint < 0) || !is.Numeric(cutpoint, integer = TRUE))
- warning("argument 'cutpoint' should contain ",
- "non-negative integer values")
+ if (any(cutpoint < 0) ||
+ !is.Numeric(cutpoint, integer.valued = TRUE))
+ warning("argument 'cutpoint' should contain ",
+ "non-negative integer values")
if (is.character(theta)) {
string <- if (short) {
@@ -924,14 +929,16 @@ nlogoff <- function(theta, earg = 0, inverse = FALSE, deriv = 0,
thmat = cbind(theta)
lambda = rep(lambda, len=ncol(thmat)) # Allow recycling for lambda
- if (is.Numeric(cutpoint)) cutpoint = rep(cutpoint, len=ncol(thmat))
+ if (is.Numeric(cutpoint))
+ cutpoint = rep(cutpoint, len=ncol(thmat))
if (ncol(thmat) > 1) {
answer = thmat
for(ii in 1:ncol(thmat))
answer[,ii] = Recall(theta = thmat[,ii],
earg = list(lambda=lambda[ii],
- cutpoint = if (is.Numeric(cutpoint)) cutpoint[ii] else NULL),
- inverse=inverse, deriv = deriv)
+ cutpoint =
+ if (is.Numeric(cutpoint)) cutpoint[ii] else NULL),
+ inverse = inverse, deriv = deriv)
return(answer)
}
@@ -941,11 +948,11 @@ nlogoff <- function(theta, earg = 0, inverse = FALSE, deriv = 0,
1 / Recall(theta = theta, earg = earg,
inverse = FALSE, deriv = deriv)
} else {
- if (is.Numeric(cutpoint)) {
- pnorm((1-care.exp(-(theta-log(cutpoint))/3)) * 3 * sqrt(lambda))
- } else {
- pnorm((1-care.exp(-theta/3)) * 3 * sqrt(lambda))
- }
+ if (is.Numeric(cutpoint)) {
+ pnorm((1-care.exp(-(theta-log(cutpoint))/3)) * 3 * sqrt(lambda))
+ } else {
+ pnorm((1-care.exp(-theta/3)) * 3 * sqrt(lambda))
+ }
}
} else {
smallno = 1 * .Machine$double.eps
@@ -956,7 +963,8 @@ nlogoff <- function(theta, earg = 0, inverse = FALSE, deriv = 0,
switch(deriv+1, {
temp = Ql / (3*sqrt(lambda))
temp = pmin(temp, 1.0 - smallno) # 100 / .Machine$double.eps
- -3*log1p(-temp) + if (is.Numeric(cutpoint)) log(cutpoint) else 0},
+ -3*log1p(-temp) +
+ if (is.Numeric(cutpoint)) log(cutpoint) else 0},
(1 - Ql / (3*sqrt(lambda))) * sqrt(lambda) * dnorm(Ql),
{ stop('cannot handle deriv = 2') },
stop("argument 'deriv' unmatched"))
@@ -975,7 +983,7 @@ nlogoff <- function(theta, earg = 0, inverse = FALSE, deriv = 0,
if (!is.Numeric(cutpoint))
stop("could not determine the cutpoint")
if (any(cutpoint < 0) ||
- !is.Numeric(cutpoint, integer = TRUE))
+ !is.Numeric(cutpoint, integer.valued = TRUE))
warning("argument 'cutpoint' should",
" contain non-negative integer values")
@@ -1056,7 +1064,8 @@ nlogoff <- function(theta, earg = 0, inverse = FALSE, deriv = 0,
stop("could not determine 'k' or it is not positive-valued")
if (!is.Numeric(cutpoint))
stop("could not determine the cutpoint")
- if (any(cutpoint < 0) || !is.Numeric(cutpoint, integer = TRUE))
+ if (any(cutpoint < 0) ||
+ !is.Numeric(cutpoint, integer.valued = TRUE))
warning("argument 'cutpoint' should",
" contain non-negative integer values")
@@ -1153,7 +1162,8 @@ nlogoff <- function(theta, earg = 0, inverse = FALSE, deriv = 0,
stop("could not determine argument 'k' or it is not positive-valued")
if (!is.Numeric(cutpoint))
stop("could not determine the cutpoint")
- if (any(cutpoint < 0) || !is.Numeric(cutpoint, integer = TRUE))
+ if (any(cutpoint < 0) ||
+ !is.Numeric(cutpoint, integer.valued = TRUE))
warning("argument 'cutpoint' should",
" contain non-negative integer values")
@@ -1288,7 +1298,7 @@ nlogoff <- function(theta, earg = 0, inverse = FALSE, deriv = 0,
checkCut = function(y) {
- if (!is.Numeric(y, posi = TRUE, integ = TRUE))
+ if (!is.Numeric(y, positive = TRUE, integer.valued = TRUE))
stop("argument 'y' must contain positive integers only")
uy = unique(y)
L = max(uy)
@@ -1305,3 +1315,74 @@ nlogoff <- function(theta, earg = 0, inverse = FALSE, deriv = 0,
+
+
+
+
+
+ nbcanlink <- function(theta, earg = list(), inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+{
+ if (is.character(theta)) {
+ string <- if (short)
+ paste("nbcanlink(", theta, ")", sep = "") else
+ paste("log(", theta, " / (", theta, " + size))", sep = "")
+ if (tag)
+ string <- paste("Nbcanlink:", string)
+ return(string)
+ }
+
+
+ if (!length(earg))
+ stop("argument 'earg' should have the eta matrix")
+ kmatrix = earg$size
+ if (!length(kmatrix))
+ stop("argument 'earg' should have a 'size' component")
+ theta = cbind(theta)
+ kmatrix = cbind(kmatrix)
+ if (ncol(kmatrix) != ncol(theta))
+ stop("arguments 'theta' and 'earg$size' do not have ",
+ "an equal number of cols")
+ if (nrow(kmatrix) != nrow(theta))
+ stop("arguments 'theta' and 'earg$size' do not have ",
+ "an equal number of rows")
+
+
+ if (deriv > 0) {
+ wrt.eta = earg$wrt.eta
+ if (!length(wrt.eta))
+ stop("argument 'earg' should have a 'wrt.eta' component")
+ if (!(wrt.eta %in% 1:2))
+ stop("argument 'earg' should be 1 or 2")
+ }
+
+
+ if (!inverse && is.list(earg) && length(earg$bval))
+ theta[theta <= 0.0] <- earg$bval
+
+
+ if (inverse) {
+ if (deriv > 0) {
+ 1 / Recall(theta = theta, earg = earg,
+ inverse = FALSE, deriv = deriv)
+ } else {
+ ans = (kmatrix / expm1(-theta))
+ if (is.matrix(ans)) dimnames(ans) = NULL else names(ans) = NULL
+ ans
+ }
+ } else {
+ ans =
+ switch(deriv+1,
+ (log(theta / (theta + kmatrix))),
+ if (wrt.eta == 1) theta * (theta + kmatrix) / kmatrix else
+ -(theta + kmatrix),
+ if (wrt.eta == 1)
+ -(theta * (theta + kmatrix))^2 / ((2 * theta + kmatrix) * kmatrix) else
+ (theta + kmatrix)^2)
+ if (is.matrix(ans)) dimnames(ans) = NULL else names(ans) = NULL
+ ans
+ }
+}
+
+
+
diff --git a/R/logLik.vlm.q b/R/logLik.vlm.q
index 3a71b27..aee904e 100644
--- a/R/logLik.vlm.q
+++ b/R/logLik.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -13,7 +13,11 @@ logLik.vlm <- function(object, ...)
if (!isGeneric("logLik"))
- setGeneric("logLik", function(object, ...) standardGeneric("logLik"))
+ setGeneric("logLik", function(object, ...)
+ standardGeneric("logLik"),
+ package = "VGAM")
+
+
setMethod("logLik", "vlm", function(object, ...)
logLik.vlm(object, ...))
diff --git a/R/lrwaldtest.R b/R/lrwaldtest.R
new file mode 100644
index 0000000..2ec1b39
--- /dev/null
+++ b/R/lrwaldtest.R
@@ -0,0 +1,597 @@
+# These functions are
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# All rights reserved.
+
+
+
+
+
+
+
+
+
+
+
+
+update_default <- function (object, formula., ..., evaluate = TRUE) {
+ if (is.null(call <- getCall(object)))
+ stop("need an object with call component")
+
+ extras <- match.call(expand.dots = FALSE)$...
+
+ if (!missing(formula.)) {
+ call$formula <- update_formula(formula(object), formula.)
+ }
+
+ if (length(extras)) {
+ existing <- !is.na(match(names(extras), names(call)))
+ for (a in names(extras)[existing])
+ call[[a]] <- extras[[a]]
+ if (any(!existing)) {
+ call <- c(as.list(call), extras[!existing])
+ call <- as.call(call)
+ }
+ }
+
+ if (evaluate) eval(call, parent.frame()) else call
+}
+
+
+
+
+
+update_formula <- function (old, new, ...) {
+
+
+ tmp <- (update.formula(as.formula(old), as.formula(new)))
+
+
+
+
+ out <- formula(terms.formula(tmp, simplify = TRUE))
+ return(out)
+}
+
+
+
+
+
+
+
+if (FALSE)
+print_anova <- function (x, digits = max(getOption("digits") - 2, 3),
+ signif.stars = getOption("show.signif.stars"),
+ ...) {
+
+
+ x <- x at Body
+
+ if (!is.null(heading <- attr(x, "heading")))
+ cat(heading, sep = "\n")
+ nc <- dim(x)[2L]
+ if (is.null(cn <- colnames(x)))
+ stop("'anova' object must have colnames")
+ has.P <- grepl("^(P|Pr)\\(", cn[nc])
+ zap.i <- 1L:(if (has.P) nc - 1 else nc)
+ i <- which(substr(cn, 2, 7) == " value")
+ i <- c(i, which(!is.na(match(cn, c("F", "Cp", "Chisq")))))
+ if (length(i))
+ zap.i <- zap.i[!(zap.i %in% i)]
+ tst.i <- i
+ if (length(i <- grep("Df$", cn)))
+ zap.i <- zap.i[!(zap.i %in% i)]
+ stats::printCoefmat(x, digits = digits, signif.stars = signif.stars,
+ has.Pvalue = has.P, P.values = has.P, cs.ind = NULL,
+ zap.ind = zap.i, tst.ind = tst.i, na.print = "", ...)
+ invisible(x)
+}
+
+
+
+ setGeneric("lrtest", function(object, ...) standardGeneric("lrtest"),
+ package = "VGAM")
+
+
+
+
+
+
+setClass("VGAManova", representation(
+ "Body" = "data.frame"))
+
+
+
+lrtest_vglm <- function(object, ..., name = NULL) {
+
+
+
+
+
+
+
+
+ cls <- class(object)[1]
+
+ nobs <- function(x) x at misc$nrow_X_vlm
+
+
+ tlab <- function(x) attr(terms(x), "term.labels")
+
+
+ if (is.null(name))
+ name <- function(x) paste(deparse(formula(x)), collapse = "\n")
+
+
+
+ modelUpdate <- function(fm, update) {
+
+ if (is.numeric(update)) {
+ if (any(update < 1)) {
+ warning("for numeric model specifications all values ",
+ "have to be >=1")
+ update <- abs(update)[abs(update) > 0]
+ }
+ if (any(update > length(tlab(fm)))) {
+ warning(paste("more terms specified than existent in the model:",
+ paste(as.character(update[update > length(tlab(fm))]),
+ collapse = ", ")))
+ update <- update[update <= length(tlab(fm))]
+ }
+ update <- tlab(fm)[update]
+ }
+
+ if (is.character(update)) {
+ if (!all(update %in% tlab(fm))) {
+ warning(paste("terms specified that are not in the model:",
+ paste(dQuote(update[!(update %in% tlab(fm))]),
+ collapse = ", ")))
+ update <- update[update %in% tlab(fm)]
+ }
+ if (length(update) < 1) stop("empty model specification")
+ update <- as.formula(paste(". ~ . -",
+ paste(update, collapse = " - ")))
+ }
+
+ if (inherits(update, "formula")) {
+ update <- update_default(fm, update)
+ }
+ if (!inherits(update, cls)) {
+ warning(paste("original model was of class \"", cls,
+ "\", updated model is of class \"",
+ class(update)[1], "\"", sep = ""))
+ }
+ return(update)
+ }
+
+
+ objects <- list(object, ...)
+ nmodels <- length(objects)
+ if (nmodels < 2) {
+ objects <- c(objects, . ~ 1)
+ nmodels <- 2
+ }
+
+ no.update <- sapply(objects, function(obj) inherits(obj, cls))
+
+ for(i in 2:nmodels) {
+ objects[[i]] <- modelUpdate(objects[[i-1]], objects[[i]])
+ }
+
+
+ ns <- sapply(objects, nobs)
+
+
+ if (any(ns != ns[1])) {
+ for(i in 2:nmodels) {
+ if (ns[1] != ns[i]) {
+ if (no.update[i])
+ stop("models were not all fitted to ",
+ "the same size of dataset") else {
+ commonobs <- row.names(model.frame(objects[[i]])) %in%
+ row.names(model.frame(objects[[i-1]]))
+ objects[[i]] <- eval(substitute(update(objects[[i]],
+ subset = commonobs),
+ list(commonobs = commonobs)))
+ if (nobs(objects[[i]]) != ns[1])
+ stop("models could not be fitted to the same size of dataset")
+ }
+ }
+ }
+ }
+
+ rval <- matrix(rep(as.numeric(NA), 5 * nmodels), ncol = 5)
+ colnames(rval) <- c("#Df", "LogLik", "Df", "Chisq", "Pr(>Chisq)")
+ rownames(rval) <- 1:nmodels
+
+ logLlist <- lapply(objects, logLik)
+
+ dflist <- lapply(objects, df.residual)
+
+ rval[,1] <- unlist(dflist)
+
+ rval[,2] <- unlist(logLlist)
+
+ rval[2:nmodels, 3] <- rval[2:nmodels, 1] - rval[1:(nmodels-1), 1]
+
+ rval[2:nmodels, 4] <- 2 * abs(rval[2:nmodels, 2] - rval[1:(nmodels-1), 2])
+
+ rval[,5] <- pchisq(rval[,4], round(abs(rval[,3])), lower.tail = FALSE)
+
+ variables <- lapply(objects, name)
+ title <- "Likelihood ratio test\n"
+ topnote <- paste("Model ", format(1:nmodels),
+ ": ", variables, sep = "", collapse = "\n")
+
+
+
+
+
+ new("VGAManova", Body =
+ structure(as.data.frame(rval), heading = c(title, topnote)))
+}
+
+
+
+
+setMethod("lrtest", "vglm",
+ function(object, ...)
+ lrtest_vglm(object = object, ...))
+
+
+
+
+
+
+
+
+
+ setMethod("show", "VGAManova",
+ function(object)
+ stats::print.anova(object at Body))
+
+
+
+
+
+
+
+
+use.S3.lrtest = TRUE
+use.S3.lrtest = FALSE
+
+
+if (use.S3.lrtest)
+lrtest <- function(object, ...) {
+ UseMethod("lrtest")
+}
+
+
+
+if (use.S3.lrtest)
+lrtest.formula <- function(object, ..., data = list()) {
+ object <- if (length(data) < 1)
+ eval(call("lm", formula = as.formula(deparse(substitute(object))),
+ environment(object))) else
+ eval(call("lm", formula = as.formula(deparse(substitute(object))),
+ data = as.name(deparse(substitute(data))),
+ environment(data)))
+ lrtest.default(object, ...)
+}
+
+
+
+if (use.S3.lrtest)
+lrtest.default <- function(object, ..., name = NULL) {
+
+
+
+
+print("hi S3 20111224")
+
+
+ cls <- class(object)[1]
+
+ nobs <- function(x) NROW(residuals(x))
+ tlab <- function(x) attr(terms(x), "term.labels")
+ if (is.null(name))
+ name <- function(x) paste(deparse(formula(x)), collapse = "\n")
+
+ modelUpdate <- function(fm, update) {
+ if (is.numeric(update)) {
+ if (any(update < 1)) {
+ warning("for numeric model specifications all values ",
+ "have to be >=1")
+ update <- abs(update)[abs(update) > 0]
+ }
+ if (any(update > length(tlab(fm)))) {
+ warning(paste("more terms specified than existent in the model:",
+ paste(as.character(update[update > length(tlab(fm))]),
+ collapse = ", ")))
+ update <- update[update <= length(tlab(fm))]
+ }
+ update <- tlab(fm)[update]
+ }
+ if (is.character(update)) {
+ if (!all(update %in% tlab(fm))) {
+ warning(paste("terms specified that are not in the model:",
+ paste(dQuote(update[!(update %in% tlab(fm))]),
+ collapse = ", ")))
+ update <- update[update %in% tlab(fm)]
+ }
+ if (length(update) < 1) stop("empty model specification")
+ update <- as.formula(paste(". ~ . -",
+ paste(update, collapse = " - ")))
+ }
+ if (inherits(update, "formula")) update <- update(fm, update)
+ if (!inherits(update, cls))
+ warning(paste("original model was of class \"", cls,
+ "\", updated model is of class \"",
+ class(update)[1], "\"", sep = ""))
+ return(update)
+ }
+
+
+ objects <- list(object, ...)
+ nmodels <- length(objects)
+ if (nmodels < 2) {
+ objects <- c(objects, . ~ 1)
+print("objects 1")
+print( objects )
+ nmodels <- 2
+ }
+
+ no.update <- sapply(objects, function(obj) inherits(obj, cls))
+print("no.update")
+print( no.update )
+
+ for(i in 2:nmodels)
+ objects[[i]] <- modelUpdate(objects[[i-1]], objects[[i]])
+
+print("objects i")
+print( objects )
+
+ ns <- sapply(objects, nobs)
+ if (any(ns != ns[1])) {
+ for(i in 2:nmodels) {
+ if (ns[1] != ns[i]) {
+ if (no.update[i])
+ stop("models were not all fitted to ",
+ "the same size of dataset") else {
+ commonobs <- row.names(model.frame(objects[[i]])) %in%
+ row.names(model.frame(objects[[i-1]]))
+print("commonobs")
+print( commonobs )
+ objects[[i]] <- eval(substitute(update(objects[[i]],
+ subset = commonobs),
+ list(commonobs = commonobs)))
+ if (nobs(objects[[i]]) != ns[1])
+ stop("models could not be fitted to the same size of dataset")
+ }
+ }
+ }
+ }
+
+ rval <- matrix(rep(as.numeric(NA), 5 * nmodels), ncol = 5)
+ colnames(rval) <- c("#Df", "LogLik", "Df", "Chisq", "Pr(>Chisq)")
+ rownames(rval) <- 1:nmodels
+
+ logL <- lapply(objects, logLik)
+ rval[,1] <- as.numeric(sapply(logL, function(x) attr(x, "df")))
+ rval[,2] <- sapply(logL, as.numeric)
+ rval[2:nmodels, 3] <- rval[2:nmodels, 1] - rval[1:(nmodels-1), 1]
+ rval[2:nmodels, 4] <- 2 * abs(rval[2:nmodels, 2] - rval[1:(nmodels-1), 2])
+ rval[,5] <- pchisq(rval[,4], round(abs(rval[,3])), lower.tail = FALSE)
+
+ variables <- lapply(objects, name)
+ title <- "Likelihood ratio test\n"
+ topnote <- paste("Model ", format(1:nmodels),
+ ": ", variables, sep = "", collapse = "\n")
+
+ structure(as.data.frame(rval), heading = c(title, topnote),
+ class = c("anova", "data.frame"))
+} # End of lrtest.default
+
+
+
+
+
+
+
+
+if (FALSE)
+ setGeneric("waldtest", function(object, ...) standardGeneric("waldtest"),
+ package = "VGAM")
+
+
+if (FALSE)
+waldtest <- function(object, ...) {
+ UseMethod("waldtest")
+}
+
+
+
+waldtest_formula <- function(object, ..., data = list()) {
+ object <- if (length(data) < 1)
+ eval(call("lm", formula = as.formula(deparse(substitute(object))),
+ environment(object))) else
+ eval(call("lm", formula = as.formula(deparse(substitute(object))),
+ data = as.name(deparse(substitute(data))), environment(data)))
+ waldtest_lm(object, ...)
+}
+
+
+
+
+waldtest_default <- function(object, ..., vcov = NULL,
+ test = c("Chisq", "F"), name = NULL) {
+
+
+ vcov. <- vcov
+ cls <- class(object)[1]
+
+
+ nobs <- function(x) NROW(residuals(x))
+
+ tlab <- function(x) attr(terms(x), "term.labels")
+
+ if (is.null(name))
+ name <- function(x) paste(deparse(formula(x)), collapse = "\n")
+
+
+
+ modelUpdate <- function(fm, update) {
+ if (is.numeric(update)) {
+ if (any(update < 1)) {
+ warning("for numeric model specifications all values ",
+ "have to be >=1")
+ update <- abs(update)[abs(update) > 0]
+ }
+ if (any(update > length(tlab(fm)))) {
+ warning(paste("more terms specified than existent in the model:",
+ paste(as.character(update[update > length(tlab(fm))]),
+ collapse = ", ")))
+ update <- update[update <= length(tlab(fm))]
+ }
+ update <- tlab(fm)[update]
+ }
+ if (is.character(update)) {
+ if (!all(update %in% tlab(fm))) {
+ warning(paste("terms specified that are not in the model:",
+ paste(dQuote(update[!(update %in% tlab(fm))]),
+ collapse = ", ")))
+ update <- update[update %in% tlab(fm)]
+ }
+ if (length(update) < 1) stop("empty model specification")
+ update <- as.formula(paste(". ~ . -",
+ paste(update, collapse = " - ")))
+ }
+ if (inherits(update, "formula")) update <- update(fm, update)
+ if (!inherits(update, cls))
+ stop(paste("original model was of class \"", cls,
+ "\", updated model is of class \"",
+ class(update)[1], "\"", sep = ""))
+ return(update)
+ }
+
+
+ modelCompare <- function(fm, fm.up, vfun = NULL) {
+ q <- length(coef(fm)) - length(coef(fm.up))
+
+ if (q > 0) {
+ fm0 <- fm.up
+ fm1 <- fm
+ } else {
+ fm0 <- fm
+ fm1 <- fm.up
+ }
+ k <- length(coef(fm1))
+ n <- nobs(fm1)
+
+ if (!all(tlab(fm0) %in% tlab(fm1)))
+ stop("models are not nested")
+ ovar <- which(!(names(coef(fm1)) %in% names(coef(fm0))))
+
+ vc <- if (is.null(vfun)) vcov(fm1) else if (is.function(vfun))
+ vfun(fm1) else vfun
+
+ stat <- t(coef(fm1)[ovar]) %*% solve(vc[ovar,ovar]) %*% coef(fm1)[ovar]
+ return(c(-q, stat))
+ }
+
+
+ objects <- list(object, ...)
+ nmodels <- length(objects)
+ if (nmodels < 2) {
+ objects <- c(objects, . ~ 1)
+ nmodels <- 2
+ }
+
+ no.update <- sapply(objects, function(obj) inherits(obj, cls))
+
+ for(i in 2:nmodels)
+ objects[[i]] <- modelUpdate(objects[[i-1]], objects[[i]])
+
+ responses <- as.character(lapply(objects,
+ function(x) deparse(terms(x)[[2]])))
+ sameresp <- responses == responses[1]
+ if (!all(sameresp)) {
+ objects <- objects[sameresp]
+ warning("models with response ", deparse(responses[!sameresp]),
+ " removed because response differs from ", "model 1")
+ }
+
+ ns <- sapply(objects, nobs)
+ if (any(ns != ns[1])) {
+ for(i in 2:nmodels) {
+ if (ns[1] != ns[i]) {
+ if (no.update[i])
+ stop("models were not all fitted to the ",
+ "same size of dataset") else {
+ commonobs <- row.names(model.frame(objects[[i]])) %in%
+ row.names(model.frame(objects[[i-1]]))
+ objects[[i]] <- eval(substitute(update(objects[[i]],
+ subset = commonobs),
+ list(commonobs = commonobs)))
+ if (nobs(objects[[i]]) != ns[1])
+ stop("models could not be fitted to the same size of dataset")
+ }
+ }
+ }
+ }
+
+ if (nmodels > 2 && !is.null(vcov.) && !is.function(vcov.))
+ stop("to compare more than 2 models `vcov.' needs to be a function")
+
+ test <- match.arg(test)
+ rval <- matrix(rep(as.numeric(NA), 4 * nmodels), ncol = 4)
+ colnames(rval) <- c("Res.Df", "Df", test,
+ paste("Pr(>", test, ")", sep = ""))
+ rownames(rval) <- 1:nmodels
+ rval[,1] <- as.numeric(sapply(objects, df.residual))
+ for(i in 2:nmodels)
+ rval[i, 2:3] <- modelCompare(objects[[i-1]], objects[[i]],
+ vfun = vcov.)
+ if (test == "Chisq") {
+ rval[,4] <- pchisq(rval[,3], round(abs(rval[,2])), lower.tail = FALSE)
+ } else {
+ df <- rval[,1]
+ for(i in 2:nmodels) if (rval[i,2] < 0) df[i] <- rval[i-1,1]
+ rval[,3] <- rval[,3]/abs(rval[,2])
+ rval[,4] <- pf(rval[,3], abs(rval[,2]), df, lower.tail = FALSE)
+ }
+
+
+ variables <- lapply(objects, name)
+ title <- "Wald test\n"
+ topnote <- paste("Model ", format(1:nmodels),
+ ": ", variables, sep = "", collapse = "\n")
+
+
+
+
+ new("VGAManova", Body =
+ structure(as.data.frame(rval), heading = c(title, topnote)))
+}
+
+
+
+
+
+
+
+if (FALSE)
+setMethod("waldtest", "vglm",
+ function(object, ...)
+ waldtest_vglm(object = object, ...))
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/R/model.matrix.vglm.q b/R/model.matrix.vglm.q
index 49168c3..4554b36 100644
--- a/R/model.matrix.vglm.q
+++ b/R/model.matrix.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -299,6 +299,7 @@ setMethod("model.frame", "vlm", function(formula, ...)
vmodel.matrix.default = function(object, data = environment(object),
contrasts.arg = NULL, xlev = NULL, ...) {
+ print("20120221; in vmodel.matrix.default")
t <- if (missing(data)) terms(object) else terms(object, data = data)
if (is.null(attr(data, "terms")))
data <- model.frame(object, data, xlev = xlev) else {
@@ -342,7 +343,13 @@ setMethod("model.frame", "vlm", function(formula, ...)
isF <- FALSE
data <- list(x = rep(0, nrow(data)))
}
- ans <- .Internal(model.matrix(t, data))
+
+
+ ans <- (model.matrix(t, data))
+
+
+
+
cons <- if (any(isF))
lapply(data[isF], function(x) attr(x, "contrasts")) else NULL
attr(ans, "contrasts") <- cons
diff --git a/R/mux.q b/R/mux.q
index cd77b65..4bfcadf 100644
--- a/R/mux.q
+++ b/R/mux.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/nobs.R b/R/nobs.R
index 6926846..d243d60 100644
--- a/R/nobs.R
+++ b/R/nobs.R
@@ -1,4 +1,4 @@
-# These functions are Copyright (C) 1998-2011 T. W. Yee All rights reserved.
+# These functions are Copyright (C) 1998-2012 T. W. Yee All rights reserved.
# nobs.R
@@ -29,11 +29,31 @@ nobs.vlm <- function(object, type = c("lm", "vlm"), ...) {
}
+
+# 20120216; if I have the if() commented out then
+# Error in loadNamespace(package, c(which.lib.loc, lib.loc)) :
+# cyclic namespace dependency detected when loading ‘VGAM’, already loading ‘VGAM’
+if (!isGeneric("nobs"))
+ setGeneric("nobs", function(object, ...)
+ standardGeneric("nobs"),
+ package = "VGAM")
+
+
setMethod("nobs", "vlm",
function(object, ...)
nobs.vlm(object, ...))
+# setMethod("nobs", "vglm",
+# function(object, ...)
+# nobs.vlm(object, ...))
+
+
+# setMethod("nobs", "vgam",
+# function(object, ...)
+# nobs.vlm(object, ...))
+
+
@@ -162,9 +182,9 @@ nvar.rcam <- function(object, type = c("rcam", "zz"), ...) {
if (!isGeneric("nvar"))
-setGeneric("nvar", function(object, ...)
- standardGeneric("nvar"),
- package = "VGAM")
+ setGeneric("nvar", function(object, ...)
+ standardGeneric("nvar"),
+ package = "VGAM")
setMethod("nvar", "vlm",
diff --git a/R/plot.vglm.q b/R/plot.vglm.q
index 8a0ccd8..ae90274 100644
--- a/R/plot.vglm.q
+++ b/R/plot.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -28,7 +28,8 @@ plotvgam = function(x, newdata = NULL, y = NULL, residuals = NULL, rugplot = TRU
na.act = x at na.action
x at na.action = list() # Don't want NAs returned from predict() or resid()
- if (!is.Numeric(varxij, integ = TRUE, allow=1, posit = TRUE))
+ if (!is.Numeric(varxij, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
stop("bad input for the 'varxij' argument")
if (any(slotNames(x) == "control")) {
x at control$varxij = varxij
@@ -43,7 +44,9 @@ plotvgam = function(x, newdata = NULL, y = NULL, residuals = NULL, rugplot = TRU
c("deviance","working","pearson","response"))[1]
- if (!is.Numeric(deriv.arg, integ = TRUE, allow=1) || deriv.arg<0)
+ if (!is.Numeric(deriv.arg, integer.valued = TRUE,
+ allowable.length = 1) ||
+ deriv.arg < 0)
stop("bad input for the 'deriv' argument")
if (se && deriv.arg > 0) {
@@ -54,9 +57,10 @@ plotvgam = function(x, newdata = NULL, y = NULL, residuals = NULL, rugplot = TRU
preplot.object <- x at preplot
if (!length(preplot.object)) {
- preplot.object <- preplotvgam(x, newdata=newdata,
- raw=raw, deriv = deriv.arg, se=se,
- varxij=varxij)
+ preplot.object <- preplotvgam(x, newdata = newdata,
+ raw = raw,
+ deriv.arg = deriv.arg, se = se,
+ varxij = varxij)
}
x at preplot = preplot.object
@@ -85,12 +89,13 @@ plotvgam = function(x, newdata = NULL, y = NULL, residuals = NULL, rugplot = TRU
x at post$plotvgam.control = control # Add it to the object
if (plot.arg)
- plotpreplotvgam(preplot.object, residuals=residuals,
- rugplot=rugplot, scale=scale, se=se,
- offset.arg = offset.arg, deriv.arg = deriv.arg,
- overlay=overlay,
- which.term=which.term, which.cf=which.cf,
- control=control)
+ plotpreplotvgam(preplot.object, residuals = residuals,
+ rugplot = rugplot, scale = scale, se = se,
+ offset.arg = offset.arg,
+ deriv.arg = deriv.arg,
+ overlay = overlay,
+ which.term = which.term, which.cf = which.cf,
+ control = control)
x at na.action = na.act # Restore it's original value
invisible(x)
@@ -123,9 +128,9 @@ getallresponses = function(xij) {
headpreplotvgam = function(object, newdata = NULL,
- terms = attr((object at terms)$terms, "term.labels"),
- raw = TRUE, deriv.arg = deriv.arg, se = FALSE,
- varxij = 1) {
+ terms = attr((object at terms)$terms, "term.labels"),
+ raw = TRUE, deriv.arg = deriv.arg, se = FALSE,
+ varxij = 1) {
Terms <- terms(object) # 11/8/03; object at terms$terms
aa <- attributes(Terms)
all.terms <- labels(Terms)
@@ -214,13 +219,13 @@ headpreplotvgam = function(object, newdata = NULL,
preplotvgam = function(object, newdata = NULL,
- terms=attr((object at terms)$terms, "term.labels"),
- raw = TRUE, deriv.arg = deriv.arg, se = FALSE,
- varxij=1) {
+ terms = attr((object at terms)$terms, "term.labels"),
+ raw = TRUE, deriv.arg = deriv.arg, se = FALSE,
+ varxij=1) {
- result1 = headpreplotvgam(object, newdata=newdata, terms=terms,
- raw=raw, deriv.arg = deriv.arg, se=se,
- varxij=varxij)
+ result1 = headpreplotvgam(object, newdata = newdata, terms = terms,
+ raw = raw, deriv.arg = deriv.arg, se = se,
+ varxij = varxij)
xvars = result1$xvars
xnames = result1$xnames
@@ -240,10 +245,10 @@ preplotvgam = function(object, newdata = NULL,
pred <- if (length(newdata)) {
predict(object, newdata, type = "terms",
- raw=raw, se.fit=se, deriv.arg = deriv.arg)
+ raw = raw, se.fit = se, deriv.arg = deriv.arg)
} else {
predict(object, type = "terms",
- raw=raw, se.fit=se, deriv.arg = deriv.arg)
+ raw = raw, se.fit = se, deriv.arg = deriv.arg)
}
fits <- if (is.atomic(pred)) NULL else pred$fit
@@ -290,10 +295,10 @@ plotvglm <- function(x, residuals = NULL, smooths= FALSE,
plotpreplotvgam <- function(x, y = NULL, residuals = NULL,
- rugplot= TRUE, se= FALSE, scale = 0,
- offset.arg = 0, deriv.arg = 0, overlay= FALSE,
- which.term = NULL, which.cf = NULL,
- control = NULL)
+ rugplot= TRUE, se= FALSE, scale = 0,
+ offset.arg = 0, deriv.arg = 0, overlay = FALSE,
+ which.term = NULL, which.cf = NULL,
+ control = NULL)
{
listof <- inherits(x[[1]], "preplotvgam")
if (listof) {
@@ -306,19 +311,19 @@ plotpreplotvgam <- function(x, y = NULL, residuals = NULL,
if ((is.character(which.term) && any(which.term == ii)) ||
(is.numeric(which.term) && any(which.term == plot.no)))
plotpreplotvgam(x[[ii]], y = NULL,
- residuals, rugplot=rugplot, se=se, scale=scale,
- offset.arg = offset.arg,
- deriv.arg = deriv.arg, overlay=overlay,
- which.cf=which.cf,
- control=control)
+ residuals, rugplot = rugplot, se = se, scale = scale,
+ offset.arg = offset.arg,
+ deriv.arg = deriv.arg, overlay = overlay,
+ which.cf = which.cf,
+ control = control)
}
} else {
dummy <- function(residuals = NULL, rugplot= TRUE, se= FALSE, scale = 0,
- offset.arg = 0, deriv.arg = 0, overlay= FALSE,
- which.cf = NULL, control=plotvgam.control())
- c(list(residuals=residuals, rugplot=rugplot, se=se, scale=scale,
- offset.arg = offset.arg, deriv.arg = deriv.arg, overlay=overlay,
- which.cf=which.cf), control)
+ offset.arg = 0, deriv.arg = 0, overlay= FALSE,
+ which.cf = NULL, control=plotvgam.control())
+ c(list(residuals=residuals, rugplot=rugplot, se=se, scale=scale,
+ offset.arg = offset.arg, deriv.arg = deriv.arg, overlay=overlay,
+ which.cf=which.cf), control)
d <- dummy(residuals=residuals, rugplot=rugplot, se=se, scale=scale,
offset.arg = offset.arg, deriv.arg = deriv.arg,
@@ -432,25 +437,25 @@ vplot.list <- function(x, y, se.y = NULL, xlab, ylab,
vplot.numeric <- function(x, y, se.y = NULL, xlab, ylab,
- residuals = NULL, rugplot= FALSE, se= FALSE, scale = 0,
- offset.arg = 0, deriv.arg = 0, overlay= FALSE,
- which.cf = NULL,
- xlim = NULL, ylim = NULL,
- llty = par()$lty,
- slty = "dashed",
- pcex = par()$cex,
- pch = par()$pch,
- pcol = par()$col,
- lcol = par()$col,
- rcol = par()$col,
- scol = par()$col,
- llwd = par()$lwd,
- slwd = par()$lwd,
- add.arg= FALSE,
- one.at.a.time= FALSE,
- noxmean = FALSE,
- separator = ":",
- ...)
+ residuals = NULL, rugplot= FALSE, se= FALSE, scale = 0,
+ offset.arg = 0, deriv.arg = 0, overlay= FALSE,
+ which.cf = NULL,
+ xlim = NULL, ylim = NULL,
+ llty = par()$lty,
+ slty = "dashed",
+ pcex = par()$cex,
+ pch = par()$pch,
+ pcol = par()$col,
+ lcol = par()$col,
+ rcol = par()$col,
+ scol = par()$col,
+ llwd = par()$lwd,
+ slwd = par()$lwd,
+ add.arg= FALSE,
+ one.at.a.time= FALSE,
+ noxmean = FALSE,
+ separator = ":",
+ ...)
{
@@ -568,7 +573,7 @@ vplot.numeric <- function(x, y, se.y = NULL, xlab, ylab,
if (!length(which.cf) ||
(length(which.cf) && any(which.cf == ii))) {
- if (is.Numeric(ylim0, allow=2)) {
+ if (is.Numeric(ylim0, allowable.length = 2)) {
ylim = ylim0
} else {
ylim <- range(ylim0, uy[,ii], na.rm= TRUE)
@@ -620,7 +625,9 @@ vplot.matrix <- function(x, y, se.y = NULL, xlab, ylab,
add.hookey <- function(ch, deriv.arg = 0) {
- if (!is.Numeric(deriv.arg, integ = TRUE, allow=1) || deriv.arg<0)
+ if (!is.Numeric(deriv.arg, integer.valued = TRUE,
+ allowable.length = 1) ||
+ deriv.arg < 0)
stop("bad input for the 'deriv' argument")
if (deriv.arg == 0)
diff --git a/R/predict.vgam.q b/R/predict.vgam.q
index 982a091..995b7e2 100644
--- a/R/predict.vgam.q
+++ b/R/predict.vgam.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -258,7 +258,7 @@ predict.vgam <- function(object, newdata=NULL,
v = attr(if(se.fit) predictor$fitted.values else
predictor, "vterm.assign")
is.lin <- is.linear.term(names(v))
- coefmat <- coefvlm(object, matrix = TRUE)
+ coefmat <- coefvlm(object, matrix.out = TRUE)
ord <- 0
for(ii in names(v)) {
ord <- ord + 1
diff --git a/R/predict.vglm.q b/R/predict.vglm.q
index 617ff79..0e11ca2 100644
--- a/R/predict.vglm.q
+++ b/R/predict.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/predict.vlm.q b/R/predict.vlm.q
index afb53fc..608f194 100644
--- a/R/predict.vlm.q
+++ b/R/predict.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -166,21 +166,21 @@ predict.vlm = function(object,
fit.summary = summaryvlm(object, dispersion=dispersion)
sigma = if (is.numeric(fit.summary at sigma)) fit.summary at sigma else
sqrt(deviance(object) / object at df.residual) # was @rss
- pred = Build.terms.vlm(x=X_vlm, coefs=coefs,
- cov=sigma^2 * fit.summary at cov.unscaled,
- assign=vasgn,
- collapse=type!="terms", M=M,
+ pred = Build.terms.vlm(x = X_vlm, coefs = coefs,
+ cov = sigma^2 * fit.summary at cov.unscaled,
+ assign = vasgn,
+ collapse = type!="terms", M=M,
dimname=list(dx1, dname2),
- coefmat=coefvlm(object, matrix=TRUE))
+ coefmat = coefvlm(object, matrix.out = TRUE))
pred$df = object at df.residual
pred$sigma = sigma
} else {
- pred = Build.terms.vlm(x=X_vlm, coefs=coefs,
- cov=NULL,
- assign=vasgn,
- collapse=type!="terms", M=M,
- dimname=list(dx1, dname2),
- coefmat=coefvlm(object, matrix=TRUE))
+ pred = Build.terms.vlm(x = X_vlm, coefs = coefs,
+ cov = NULL,
+ assign = vasgn,
+ collapse = type!="terms", M=M,
+ dimname = list(dx1, dname2),
+ coefmat = coefvlm(object, matrix.out = TRUE))
}
constant = attr(pred, "constant")
diff --git a/R/print.vglm.q b/R/print.vglm.q
index 5eb7d08..7c653f5 100644
--- a/R/print.vglm.q
+++ b/R/print.vglm.q
@@ -1,96 +1,225 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
+
+
+
+
+
+show.vglm <- function(object) {
+ if (!is.null(cl <- object at call)) {
+ cat("Call:\n")
+ dput(cl)
+ }
+
+ coef <- object at coefficients
+ if (any(nas <- is.na(coef))) {
+ if (is.null(names(coef)))
+ names(coef) <- paste("b", 1:length(coef), sep = "")
+ cat("\nCoefficients: (", sum(nas),
+ " not defined because of singularities)\n", sep = "")
+ } else {
+ cat("\nCoefficients:\n")
+ }
+ print(coef)
+
+ rank <- object at rank
+ if (!length(rank))
+ rank <- sum(!nas)
+ nobs <- if (length(object at df.total)) object at df.total else
+ length(object at residuals)
+ rdf <- object at df.residual
+ if (!length(rdf))
+ rdf <- nobs - rank
+ cat("\nDegrees of Freedom:", nobs, "Total;", rdf, "Residual\n")
+
+ if (length(deviance(object)))
+ cat("Residual deviance:", format(deviance(object)), "\n")
+ llx = logLik.vlm(object = object)
+
+ if (length(llx))
+ cat("Log-likelihood:", format(llx), "\n")
+
+ if (length(object at criterion)) {
+ ncrit <- names(object at criterion)
+ for(ii in ncrit)
+ if (ii != "loglikelihood" &&
+ ii != "deviance")
+ cat(paste(ii, ":", sep = ""),
+ format(object at criterion[[ii]]), "\n")
+ }
+
+ invisible(object)
+}
+
+
+
+
+
+
+
+
+
+
+show.vgam <- function(object) {
+
+ digits = 2
+
+
+ if (!is.null(cl <- object at call)) {
+ cat("Call:\n")
+ dput(cl)
+ }
+
+ coef <- object at coefficients
+ nas <- is.na(coef)
+
+ rank <- object at rank
+ if (is.null(rank))
+ rank <- sum(!nas)
+ nobs <- if (length(object at df.total)) object at df.total else
+ length(object at residuals)
+ rdf <- object at df.residual
+ if (is.null(rdf))
+ rdf <- nobs - rank
+ cat("\nDegrees of Freedom:", nobs, "Total;",
+ format(round(rdf, digits = digits)), "Residual\n")
+
+ if (length(deviance(object)))
+ cat("Residual deviance:", format(deviance(object)), "\n")
+
+ llx = logLik.vlm(object = object)
+
+ if (length(llx))
+ cat("Log-likelihood:", format(llx), "\n")
+
+ criterion <- attr(terms(object), "criterion")
+ if (!is.null(criterion) &&
+ criterion != "coefficients")
+ cat(paste(criterion, ":", sep = ""),
+ format(object[[criterion]]), "\n")
+
+ invisible(object)
+}
+
+
+
+
+setMethod("show", "vlm", function(object) show.vlm (object))
+setMethod("show", "vglm", function(object) show.vglm(object))
+setMethod("show", "vgam", function(object) show.vgam(object))
+
+
+
+
+
+
+
+
+ if (FALSE)
print.vglm <- function(x, ...) {
- if (!is.null(cl <- x at call)) {
- cat("Call:\n")
- dput(cl)
- }
-
- coef <- x at coefficients
- if (any(nas <- is.na(coef))) {
- if (is.null(names(coef)))
- names(coef) <- paste("b", 1:length(coef), sep = "")
- cat("\nCoefficients: (", sum(nas),
- " not defined because of singularities)\n", sep = "")
- } else
- cat("\nCoefficients:\n")
- print.default(coef, ...)
-
- rank <- x at rank
- if (!length(rank))
- rank <- sum(!nas)
- nobs <- if (length(x at df.total)) x at df.total else length(x at residuals)
- rdf <- x at df.residual
- if (!length(rdf))
- rdf <- nobs - rank
- cat("\nDegrees of Freedom:", nobs, "Total;", rdf, "Residual\n")
-
- if (length(deviance(x)))
- cat("Residual Deviance:", format(deviance(x)), "\n")
- llx = logLik.vlm(object = x)
-
- if (length(llx))
- cat("Log-likelihood:", format(llx), "\n")
-
- if (length(x at criterion)) {
- ncrit <- names(x at criterion)
- for(i in ncrit)
- if (i!="loglikelihood" && i!="deviance")
- cat(paste(i, ":", sep=""), format(x at criterion[[i]]), "\n")
- }
-
- invisible(x)
+ if (!is.null(cl <- x at call)) {
+ cat("Call:\n")
+ dput(cl)
+ }
+
+ coef <- x at coefficients
+ if (any(nas <- is.na(coef))) {
+ if (is.null(names(coef)))
+ names(coef) <- paste("b", 1:length(coef), sep = "")
+ cat("\nCoefficients: (", sum(nas),
+ " not defined because of singularities)\n", sep = "")
+ } else {
+ cat("\nCoefficients:\n")
+ }
+ print.default(coef, ...)
+
+ rank <- x at rank
+ if (!length(rank))
+ rank <- sum(!nas)
+ nobs <- if (length(x at df.total)) x at df.total else
+ length(x at residuals)
+ rdf <- x at df.residual
+ if (!length(rdf))
+ rdf <- nobs - rank
+ cat("\nDegrees of Freedom:", nobs, "Total;", rdf, "Residual\n")
+
+ if (length(deviance(x)))
+ cat("Residual deviance:", format(deviance(x)), "\n")
+ llx = logLik.vlm(object = x)
+
+ if (length(llx))
+ cat("Log-likelihood:", format(llx), "\n")
+
+ if (length(x at criterion)) {
+ ncrit <- names(x at criterion)
+ for(ii in ncrit)
+ if (ii != "loglikelihood" && ii != "deviance")
+ cat(paste(ii, ":", sep = ""),
+ format(x at criterion[[ii]]), "\n")
+ }
+
+ invisible(x)
}
-print.vgam <- function(x, digits=2, ...) {
- if (!is.null(cl <- x at call)) {
- cat("Call:\n")
- dput(cl)
- }
+ if (FALSE)
+print.vgam <- function(x, digits = 2, ...) {
+
+ if (!is.null(cl <- x at call)) {
+ cat("Call:\n")
+ dput(cl)
+ }
- coef <- x at coefficients
- nas <- is.na(coef)
+ coef <- x at coefficients
+ nas <- is.na(coef)
- rank <- x at rank
- if (is.null(rank))
- rank <- sum(!nas)
- nobs <- if (length(x at df.total)) x at df.total else length(x at residuals)
- rdf <- x at df.residual
- if (is.null(rdf))
- rdf <- nobs - rank
- cat("\nDegrees of Freedom:", nobs, "Total;",
- format(round(rdf, dig=digits)), "Residual\n")
+ rank <- x at rank
+ if (is.null(rank))
+ rank <- sum(!nas)
+ nobs <- if (length(x at df.total)) x at df.total else
+ length(x at residuals)
+ rdf <- x at df.residual
+ if (is.null(rdf))
+ rdf <- nobs - rank
+ cat("\nDegrees of Freedom:", nobs, "Total;",
+ format(round(rdf, dig = digits)), "Residual\n")
- if (length(deviance(x)))
- cat("Residual Deviance:", format(deviance(x)), "\n")
+ if (length(deviance(x)))
+ cat("Residual deviance:", format(deviance(x)), "\n")
- llx = logLik.vlm(object = x)
+ llx = logLik.vlm(object = x)
- if (length(llx))
- cat("Log-likelihood:", format(llx), "\n")
+ if (length(llx))
+ cat("Log-likelihood:", format(llx), "\n")
- criterion <- attr(terms(x), "criterion") # 11/8/03; x at terms$terms,
- if (!is.null(criterion) && criterion!="coefficients")
- cat(paste(criterion, ":", sep=""), format(x[[criterion]]), "\n")
+ criterion <- attr(terms(x), "criterion") # 11/8/03; x at terms$terms,
+ if (!is.null(criterion) &&
+ criterion != "coefficients")
+ cat(paste(criterion, ":", sep = ""), format(x[[criterion]]), "\n")
- invisible(x)
+ invisible(x)
}
+ if (FALSE)
+{
setMethod("print", "vlm", function(x, ...) print.vlm(x, ...))
setMethod("print", "vglm", function(x, ...) print.vglm(x, ...))
setMethod("print", "vgam", function(x, ...) print.vgam(x, ...))
+
setMethod("show", "vlm", function(object) print.vlm(object))
setMethod("show", "vglm", function(object) print.vglm(object))
setMethod("show", "vgam", function(object) print.vgam(object))
+}
+
+
diff --git a/R/print.vlm.q b/R/print.vlm.q
index 2f06c69..5dce615 100644
--- a/R/print.vlm.q
+++ b/R/print.vlm.q
@@ -1,47 +1,106 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
+
+
+
+
+
+show.vlm <- function(object) {
+ if (!is.null(cl <- object at call)) {
+ cat("Call:\n")
+ dput(cl)
+ }
+
+ coef <- object at coefficients
+ cat("\nCoefficients:\n")
+ print(coef)
+
+ rank <- object at rank
+ if (is.null(rank))
+ rank <- sum(!is.na(coef))
+ n <- object at misc$n
+ M <- object at misc$M
+ nobs <- if (length(object at df.total)) object at df.total else n * M
+ rdf <- object at df.residual
+ if (is.null(rdf))
+ rdf <- (n - rank) * M
+ cat("\nDegrees of Freedom:", nobs, "Total;",
+ rdf, "Residual\n")
+
+ if (length(deviance(object)) &&
+ is.finite(deviance(object)))
+ cat("Deviance:", format(deviance(object)), "\n")
+ if (length(object at rss) &&
+ is.finite(object at rss))
+ cat("Residual Sum of Squares:", format(object at rss), "\n")
+
+ invisible(object)
+}
+
+
+
+setMethod("show", "vlm",
+ function(object)
+ show.vlm(object))
+
+
+
+
+
+
+
+if (FALSE)
+print.vlm <- function(x, ...) {
+ if (!is.null(cl <- x at call)) {
+ cat("Call:\n")
+ dput(cl)
+ }
+
+ coef <- x at coefficients
+ cat("\nCoefficients:\n")
+ print(coef, ...)
+
+ rank <- x at rank
+ if (is.null(rank))
+ rank <- sum(!is.na(coef))
+ n <- x at misc$n
+ M <- x at misc$M
+ nobs <- if (length(x at df.total)) x at df.total else n * M
+ rdf <- x at df.residual
+ if (is.null(rdf))
+ rdf <- (n - rank) * M
+ cat("\nDegrees of Freedom:", nobs, "Total;",
+ rdf, "Residual\n")
+
+ if (length(deviance(x)) &&
+ is.finite(deviance(x)))
+ cat("Deviance:", format(deviance(x)), "\n")
+ if (length(x at rss) &&
+ is.finite(x at rss))
+ cat("Residual Sum of Squares:", format(x at rss), "\n")
+
+ invisible(x)
+}
+
+
+
+
if (!is.R()) {
setMethod("show", "vlm",
function(object)
print.vlm(object))
}
+
+
+if (FALSE)
setMethod("print", "vlm",
function(x, ...)
print.vlm(x, ...))
-print.vlm <- function(x, ...) {
- if (!is.null(cl <- x at call)) {
- cat("Call:\n")
- dput(cl)
- }
-
- coef <- x at coefficients
- cat("\nCoefficients:\n")
- print(coef, ...)
-
- rank <- x at rank
- if (is.null(rank))
- rank <- sum(!is.na(coef))
- n <- x at misc$n
- M <- x at misc$M
- nobs <- if (length(x at df.total)) x at df.total else n*M
- rdf <- x at df.residual
- if (is.null(rdf))
- rdf <- (n - rank) * M
- cat("\nDegrees of Freedom:", nobs, "Total;", rdf, "Residual\n")
-
- if (length(deviance(x)) && is.finite(deviance(x)))
- cat("Deviance:", format(deviance(x)), "\n")
- if (length(x at rss) && is.finite(x at rss))
- cat("Residual Sum of Squares:", format(x at rss), "\n")
-
- invisible(x)
-}
-
diff --git a/R/qrrvglm.control.q b/R/qrrvglm.control.q
index a261d6b..6722a68 100644
--- a/R/qrrvglm.control.q
+++ b/R/qrrvglm.control.q
@@ -1,94 +1,111 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
-qrrvglm.control = function(Rank=1,
+qrrvglm.control = function(Rank = 1,
Bestof = if (length(Cinit)) 1 else 10,
- checkwz=TRUE,
+ checkwz = TRUE,
Cinit = NULL,
- Crow1positive=TRUE,
+ Crow1positive = TRUE,
epsilon = 1.0e-06,
EqualTolerances = TRUE,
Etamat.colmax = 10,
FastAlgorithm = TRUE,
- GradientFunction=TRUE,
+ GradientFunction = TRUE,
Hstep = 0.001,
- isdlv = rep(c(2, 1, rep(0.5, len=Rank)), len=Rank),
+ isdlv = rep(c(2, 1, rep(0.5, length = Rank)), length = Rank),
iKvector = 0.1,
iShape = 0.1,
ITolerances = FALSE,
maxitl = 40,
imethod = 1,
Maxit.optim = 250,
- MUXfactor = rep(7, length=Rank),
+ MUXfactor = rep(7, length = Rank),
Norrr = ~ 1,
optim.maxit = 20,
Parscale = if (ITolerances) 0.001 else 1.0,
SD.Cinit = 0.02,
SmallNo = 5.0e-13,
trace = TRUE,
- Use.Init.Poisson.QO=TRUE,
+ Use.Init.Poisson.QO = TRUE,
wzepsilon = .Machine$double.eps^0.75,
...)
{
- if (!is.Numeric(iShape, posit=TRUE)) stop("bad input for 'iShape'")
- if (!is.Numeric(iKvector, posit=TRUE)) stop("bad input for 'iKvector'")
- if (!is.Numeric(isdlv, posit=TRUE)) stop("bad input for 'isdlv'")
- if (any(isdlv < 0.2 | isdlv > 10))
+ if (!is.Numeric(iShape, positive = TRUE))
+ stop("bad input for 'iShape'")
+ if (!is.Numeric(iKvector, positive = TRUE))
+ stop("bad input for 'iKvector'")
+ if (!is.Numeric(isdlv, positive = TRUE))
+ stop("bad input for 'isdlv'")
+ if (any(isdlv < 0.2 |
+ isdlv > 10))
stop("isdlv values must lie between 0.2 and 10")
if (length(isdlv) > 1 && any(diff(isdlv) > 0))
stop("successive isdlv values must not increase")
- if (!is.Numeric(epsilon, posit=TRUE, allow=1))
+ if (!is.Numeric(epsilon, positive = TRUE,
+ allowable.length = 1))
stop("bad input for 'epsilon'")
- if (!is.Numeric(Etamat.colmax, posit=TRUE, allow=1) || Etamat.colmax < Rank)
+ if (!is.Numeric(Etamat.colmax, positive = TRUE,
+ allowable.length = 1) ||
+ Etamat.colmax < Rank)
stop("bad input for 'Etamat.colmax'")
- if (!is.Numeric(Hstep, posit=TRUE, allow=1))
+ if (!is.Numeric(Hstep, positive = TRUE,
+ allowable.length = 1))
stop("bad input for 'Hstep'")
- if (!is.Numeric(maxitl, posit=TRUE, allow=1, integer=TRUE))
+ if (!is.Numeric(maxitl, positive = TRUE,
+ allowable.length = 1, integer.valued = TRUE))
stop("bad input for 'maxitl'")
- if (!is.Numeric(imethod, posit=TRUE, allow=1, integer=TRUE))
+ if (!is.Numeric(imethod, positive = TRUE,
+ allowable.length = 1, integer.valued = TRUE))
stop("bad input for 'imethod'")
- if (!is.Numeric(Maxit.optim, integ=TRUE, posit=TRUE))
+ if (!is.Numeric(Maxit.optim, integer.valued = TRUE, positive = TRUE))
stop("Bad input for 'Maxit.optim'")
- if (!is.Numeric(MUXfactor, posit=TRUE))
+ if (!is.Numeric(MUXfactor, positive = TRUE))
stop("bad input for 'MUXfactor'")
if (any(MUXfactor < 1 | MUXfactor > 10))
stop("MUXfactor values must lie between 1 and 10")
- if (!is.Numeric(optim.maxit, allow=1, integ=TRUE, posit=TRUE))
+ if (!is.Numeric(optim.maxit, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
stop("Bad input for 'optim.maxit'")
- if (!is.Numeric(Rank, posit=TRUE, allow=1, integer=TRUE))
+ if (!is.Numeric(Rank, positive = TRUE,
+ allowable.length = 1, integer.valued = TRUE))
stop("bad input for 'Rank'")
- if (!is.Numeric(SD.Cinit, posit=TRUE, allow=1))
+ if (!is.Numeric(SD.Cinit, positive = TRUE,
+ allowable.length = 1))
stop("bad input for 'SD.Cinit'")
if (ITolerances && !EqualTolerances)
stop("'EqualTolerances' must be TRUE if 'ITolerances' is TRUE")
- if (!is.Numeric(Bestof, posit=TRUE, allow=1, integer=TRUE))
+ if (!is.Numeric(Bestof, positive = TRUE,
+ allowable.length = 1, integer.valued = TRUE))
stop("bad input for 'Bestof'")
FastAlgorithm = as.logical(FastAlgorithm)[1]
if (!FastAlgorithm)
- stop("FastAlgorithm=TRUE is now required")
+ stop("FastAlgorithm = TRUE is now required")
if ((SmallNo < .Machine$double.eps) ||
- (SmallNo > .0001)) stop("SmallNo is out of range")
+ (SmallNo > .0001))
+ stop("SmallNo is out of range")
if (any(Parscale <= 0))
stop("Parscale must contain positive numbers only")
- if (!is.logical(checkwz) || length(checkwz) != 1)
+ if (!is.logical(checkwz) ||
+ length(checkwz) != 1)
stop("bad input for 'checkwz'")
- if (!is.Numeric(wzepsilon, allow=1, positive=TRUE))
+ if (!is.Numeric(wzepsilon,
+ allowable.length = 1, positive = TRUE))
stop("bad input for 'wzepsilon'")
ans = list(
Bestof = Bestof,
checkwz=checkwz,
Cinit = Cinit,
- Crow1positive=as.logical(rep(Crow1positive, len=Rank)),
+ Crow1positive=as.logical(rep(Crow1positive, len = Rank)),
ConstrainedQO = TRUE, # A constant, not a control parameter
Corner = FALSE, # Needed for valt.1iter()
Dzero = NULL,
@@ -98,7 +115,7 @@ qrrvglm.control = function(Rank=1,
FastAlgorithm = FastAlgorithm,
GradientFunction = GradientFunction,
Hstep = Hstep,
- isdlv = rep(isdlv, len=Rank),
+ isdlv = rep(isdlv, len = Rank),
iKvector = as.numeric(iKvector),
iShape = as.numeric(iShape),
ITolerances = ITolerances,
@@ -106,7 +123,7 @@ qrrvglm.control = function(Rank=1,
imethod = imethod,
Maxit.optim = Maxit.optim,
min.criterion = TRUE, # needed for calibrate
- MUXfactor = rep(MUXfactor, length=Rank),
+ MUXfactor = rep(MUXfactor, length = Rank),
Norrr = Norrr,
optim.maxit = optim.maxit,
OptimizeWrtC = TRUE,
@@ -117,9 +134,9 @@ qrrvglm.control = function(Rank=1,
SD.Cinit = SD.Cinit,
SmallNo = SmallNo,
szero = NULL,
- Svd.arg = TRUE, Alpha=0.5, Uncorrelated.lv = TRUE,
+ Svd.arg = TRUE, Alpha = 0.5, Uncorrelated.lv = TRUE,
trace = trace,
- Use.Init.Poisson.QO=as.logical(Use.Init.Poisson.QO)[1],
+ Use.Init.Poisson.QO = as.logical(Use.Init.Poisson.QO)[1],
wzepsilon = wzepsilon)
ans
}
diff --git a/R/qtplot.q b/R/qtplot.q
index c426e84..51f0d43 100644
--- a/R/qtplot.q
+++ b/R/qtplot.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -28,44 +28,47 @@ qtplot.lms.bcn <- function(percentiles = c(25,50,75),
answer
}
+
qtplot.lms.bcg <- function(percentiles = c(25,50,75),
eta = NULL, yoffset = 0)
{
- cc <- percentiles
- lp = length(percentiles)
- answer <- matrix(as.numeric(NA), nrow(eta), lp,
- dimnames = list(dimnames(eta)[[1]],
- paste(as.character(percentiles), "%", sep = "")))
- lambda <- eta[, 1]
- sigma <- eta[, 3]
- shape <- 1 / (lambda * sigma)^2
- for(ii in 1:lp) {
- ccc <- rep(cc[ii]/100, len=nrow(eta))
- ccc <- ifelse(lambda > 0, ccc, 1-ccc)
- answer[, ii] <- eta[, 2] * (qgamma(ccc, sh=shape)/shape)^(1/lambda)
- }
- answer
+ cc <- percentiles
+ lp = length(percentiles)
+ answer <- matrix(as.numeric(NA), nrow(eta), lp,
+ dimnames = list(dimnames(eta)[[1]],
+ paste(as.character(percentiles), "%", sep = "")))
+ lambda <- eta[, 1]
+ sigma <- eta[, 3]
+ shape <- 1 / (lambda * sigma)^2
+ for(ii in 1:lp) {
+ ccc <- rep(cc[ii]/100, len=nrow(eta))
+ ccc <- ifelse(lambda > 0, ccc, 1-ccc)
+ answer[, ii] <- eta[, 2] *
+ (qgamma(ccc, shape = shape)/shape)^(1/lambda)
+ }
+ answer
}
+
qtplot.lms.yjn2 <-
qtplot.lms.yjn <- function(percentiles = c(25,50,75),
eta = NULL, yoffset = 0)
{
- cc <- percentiles
- lp = length(percentiles)
- answer <- matrix(as.numeric(NA), nrow(eta), lp,
- dimnames = list(dimnames(eta)[[1]],
- paste(as.character(percentiles), "%", sep = "")))
- lambda <- eta[, 1]
- mu <- eta[, 2]
- sigma <- eta[, 3] # Link function already taken care of above
- for(ii in 1:lp) {
- ccc <- mu + sigma * qnorm(cc[ii]/100)
- answer[, ii] <- yeo.johnson(ccc, lambda, inverse= TRUE) - yoffset
- }
- answer
+ cc <- percentiles
+ lp = length(percentiles)
+ answer <- matrix(as.numeric(NA), nrow(eta), lp,
+ dimnames = list(dimnames(eta)[[1]],
+ paste(as.character(percentiles), "%", sep = "")))
+ lambda <- eta[, 1]
+ mu <- eta[, 2]
+ sigma <- eta[, 3] # Link function already taken care of above
+ for(ii in 1:lp) {
+ ccc <- mu + sigma * qnorm(cc[ii]/100)
+ answer[, ii] <- yeo.johnson(ccc, lambda, inverse= TRUE) - yoffset
+ }
+ answer
}
qtplot.default <- function(object, ...) {
@@ -127,8 +130,8 @@ qtplot.lmscreg <- function(object,
}
if (plot.it) {
- plotqtplot.lmscreg(fit=fitted.values, obj=object,
- newdata=newdata,
+ plotqtplot.lmscreg(fitted.values = fitted.values, object = object,
+ newdata = newdata,
lp = lp,
percentiles = percentiles, ...)
}
@@ -140,7 +143,7 @@ qtplot.lmscreg <- function(object,
plotqtplot.lmscreg <- function(fitted.values, object,
newdata = NULL,
- percentiles=object at misc$percentiles,
+ percentiles = object at misc$percentiles,
lp = NULL,
add.arg = FALSE,
y = if (length(newdata)) FALSE else TRUE,
@@ -148,7 +151,8 @@ plotqtplot.lmscreg <- function(fitted.values, object,
label = TRUE,
size.label = 0.06,
xlab = NULL, ylab = "",
- pch = par()$pch, pcex = par()$cex, pcol.arg = par()$col,
+ pch = par()$pch, pcex = par()$cex,
+ pcol.arg = par()$col,
xlim = NULL, ylim = NULL,
llty.arg = par()$lty,
lcol.arg = par()$col, llwd.arg = par()$lwd,
@@ -278,21 +282,24 @@ if (TRUE) {
qtplot.egumbel <-
qtplot.gumbel <-
- function(object, plot.it = TRUE, y.arg = TRUE, spline.fit = FALSE, label = TRUE,
- R=object at misc$R,
- percentiles=object at misc$percentiles,
+ function(object, plot.it = TRUE, y.arg = TRUE,
+ spline.fit = FALSE, label = TRUE,
+ R = object at misc$R,
+ percentiles = object at misc$percentiles,
add.arg = FALSE,
- mpv=object at misc$mpv,
+ mpv = object at misc$mpv,
xlab = NULL, ylab = "", main = "",
pch = par()$pch, pcol.arg = par()$col,
- llty.arg = par()$lty, lcol.arg = par()$col, llwd.arg = par()$lwd,
+ llty.arg = par()$lty, lcol.arg = par()$col,
+ llwd.arg = par()$lwd,
tcol.arg = par()$col, tadj = 1, ...)
{
if (!is.logical(mpv) || length(mpv) != 1)
- stop("bad input for 'mpv'")
+ stop("bad input for 'mpv'")
if (!length(percentiles) ||
- (!is.Numeric(percentiles, posit = TRUE) || max(percentiles) >= 100))
- stop("bad input for 'percentiles'")
+ (!is.Numeric(percentiles, positive = TRUE) ||
+ max(percentiles) >= 100))
+ stop("bad input for 'percentiles'")
@@ -604,7 +611,7 @@ cdf.lms.bcg <- function(y, eta0)
{
shape = 1 / (eta0[, 1] * eta0[, 3])^2
Gvec = shape * (y/eta0[, 2])^(eta0[, 1])
- ans = c(pgamma(Gvec, sh=shape))
+ ans = c(pgamma(Gvec, shape = shape))
ans[eta0[, 1] < 0] = 1-ans
names(ans) = dimnames(eta0)[[1]]
ans
@@ -686,12 +693,13 @@ rlplot.gev <-
{
log.arg = log
rm(log)
- if (!is.Numeric(epsilon, allow = 1) || abs(epsilon) > 0.10)
- stop("bad input for 'epsilon'")
- if (!is.Numeric(probability, posit = TRUE) ||
+ if (!is.Numeric(epsilon, allowable.length = 1) ||
+ abs(epsilon) > 0.10)
+ stop("bad input for 'epsilon'")
+ if (!is.Numeric(probability, positive = TRUE) ||
max(probability) >= 1 ||
- length(probability) < 5)
- stop("bad input for 'probability'")
+ length(probability) < 5)
+ stop("bad input for 'probability'")
if (!is.logical(log.arg) || length(log.arg) != 1)
stop("bad input for argument 'log'")
diff --git a/R/residuals.vlm.q b/R/residuals.vlm.q
index fe301c8..5055212 100644
--- a/R/residuals.vlm.q
+++ b/R/residuals.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -39,8 +39,9 @@ residualsvlm <- function(object,
names(ans) <- names(object at residuals)
ans
} else {
- wz.sqrt <- matrix.power(wz, M=M, power=0.5, fast=TRUE)
- ans <- mux22(wz.sqrt, object at residuals, M=M, upper=FALSE)
+ wz.sqrt <- matrix.power(wz, M = M, power = 0.5, fast = TRUE)
+ ans <- mux22(wz.sqrt, object at residuals,
+ M = M, upper = FALSE)
dim(ans) <- c(M, n)
ans <- t(ans)
dimnames(ans) <- dimnames(object at residuals) # n x M
@@ -102,7 +103,8 @@ residualsvglm <- function(object,
ans
} else {
wz.sqrt <- matrix.power(wz, M=M, power=0.5, fast=TRUE)
- ans <- mux22(wz.sqrt, object at residuals, M=M, upper=FALSE)
+ ans <- mux22(wz.sqrt, object at residuals,
+ M = M, upper = FALSE)
dim(ans) <- c(M,n)
ans <- t(ans)
dimnames(ans) <- dimnames(object at residuals) # n x M
diff --git a/R/rrvglm.R b/R/rrvglm.R
index 2e7a3db..34e9341 100644
--- a/R/rrvglm.R
+++ b/R/rrvglm.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -33,7 +33,7 @@ rrvglm <- function(formula,
if (missing(data))
data <- environment(formula)
- mf <- match.call(expand = FALSE)
+ mf <- match.call(expand.dots = FALSE)
mf$family <- mf$method <- mf$model <- mf$x.arg <- mf$y.arg <- mf$control <-
mf$contrasts <- mf$constraints <- mf$extra <- mf$qr.arg <- NULL
mf$coefstart <- mf$etastart <- mf$... <- NULL
diff --git a/R/rrvglm.control.q b/R/rrvglm.control.q
index a36dbf7..e207d70 100644
--- a/R/rrvglm.control.q
+++ b/R/rrvglm.control.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -7,24 +7,24 @@
rrvglm.control = function(Rank = 1,
Algorithm = c("alternating", "derivative"),
- Corner=TRUE,
- Uncorrelated.lv=FALSE,
- Wmat=NULL,
- Svd.arg=FALSE,
+ Corner = TRUE,
+ Uncorrelated.lv = FALSE,
+ Wmat = NULL,
+ Svd.arg = FALSE,
Index.corner = if (length(szero))
head((1:1000)[-szero], Rank) else 1:Rank,
- Ainit=NULL,
- Alpha=0.5,
+ Ainit = NULL,
+ Alpha = 0.5,
Bestof = 1,
- Cinit=NULL,
+ Cinit = NULL,
Etamat.colmax = 10,
SD.Ainit = 0.02,
SD.Cinit = 0.02,
szero = NULL,
Norrr = ~ 1,
trace = FALSE,
- Use.Init.Poisson.QO=FALSE,
- checkwz=TRUE,
+ Use.Init.Poisson.QO = FALSE,
+ checkwz = TRUE,
wzepsilon = .Machine$double.eps^0.75,
...)
{
@@ -38,23 +38,30 @@ rrvglm.control = function(Rank = 1,
if (Svd.arg) Corner = FALSE
- if (!is.Numeric(Rank, posit=TRUE, allow=1, integer=TRUE))
+ if (!is.Numeric(Rank, positive = TRUE,
+ allowable.length = 1, integer.valued = TRUE))
stop("bad input for 'Rank'")
- if (!is.Numeric(Alpha, posit=TRUE, allow=1) || Alpha > 1)
+ if (!is.Numeric(Alpha, positive = TRUE,
+ allowable.length = 1) || Alpha > 1)
stop("bad input for 'Alpha'")
- if (!is.Numeric(Bestof, posit=TRUE, allow=1, integer=TRUE))
+ if (!is.Numeric(Bestof, positive = TRUE,
+ allowable.length = 1, integer.valued = TRUE))
stop("bad input for 'Bestof'")
- if (!is.Numeric(SD.Ainit, posit=TRUE, allow=1))
+ if (!is.Numeric(SD.Ainit, positive = TRUE,
+ allowable.length = 1))
stop("bad input for 'SD.Ainit'")
- if (!is.Numeric(SD.Cinit, posit=TRUE, allow=1))
+ if (!is.Numeric(SD.Cinit, positive = TRUE,
+ allowable.length = 1))
stop("bad input for 'SD.Cinit'")
- if (!is.Numeric(Etamat.colmax, posit=TRUE, allow=1) ||
+ if (!is.Numeric(Etamat.colmax, positive = TRUE,
+ allowable.length = 1) ||
Etamat.colmax < Rank)
stop("bad input for 'Etamat.colmax'")
- if (length(szero) && (any(round(szero) != szero)
- || any(szero<1)))
- stop("bad input for the argument 'szero'")
+ if (length(szero) &&
+ (any(round(szero) != szero) ||
+ any(szero < 1)))
+ stop("bad input for the argument 'szero'")
Quadratic = FALSE
@@ -69,7 +76,7 @@ rrvglm.control = function(Rank = 1,
stop("Quadratic model can only be fitted using the derivative algorithm")
if (Corner && (Svd.arg || Uncorrelated.lv || length(Wmat)))
- stop("cannot have Corner=TRUE and either Svd=TRUE or Uncorrelated.lv=TRUE or Wmat")
+ stop("cannot have Corner = TRUE and either Svd = TRUE or Uncorrelated.lv = TRUE or Wmat")
if (Corner && length(intersect(szero, Index.corner)))
stop("cannot have szero and Index.corner having common values")
@@ -79,7 +86,7 @@ rrvglm.control = function(Rank = 1,
if (!is.logical(checkwz) || length(checkwz) != 1)
stop("bad input for 'checkwz'")
- if (!is.Numeric(wzepsilon, allow=1, positive=TRUE))
+ if (!is.Numeric(wzepsilon, allowable.length = 1, positive = TRUE))
stop("bad input for 'wzepsilon'")
if (class(Norrr) != "formula" && !is.null(Norrr))
@@ -89,17 +96,16 @@ rrvglm.control = function(Rank = 1,
c(vglm.control(trace = trace, ...),
switch(Algorithm,
"alternating" = valt.control(...),
- "derivative" = if (is.R()) rrvglm.optim.control(...) else
- nlminbcontrol(...)),
- list(Rank=Rank,
- Ainit=Ainit,
- Algorithm=Algorithm,
- Alpha=Alpha,
+ "derivative" = rrvglm.optim.control(...)),
+ list(Rank = Rank,
+ Ainit = Ainit,
+ Algorithm = Algorithm,
+ Alpha = Alpha,
Bestof = Bestof,
- Cinit=Cinit,
- Index.corner=Index.corner,
- Norrr=Norrr,
- Corner=Corner, Uncorrelated.lv=Uncorrelated.lv, Wmat=Wmat,
+ Cinit = Cinit,
+ Index.corner = Index.corner,
+ Norrr = Norrr,
+ Corner = Corner, Uncorrelated.lv = Uncorrelated.lv, Wmat = Wmat,
OptimizeWrtC = TRUE, # OptimizeWrtC,
Quadratic = FALSE, # A constant now, here.
SD.Ainit = SD.Ainit,
@@ -107,10 +113,10 @@ rrvglm.control = function(Rank = 1,
Etamat.colmax = Etamat.colmax,
szero = szero,
Svd.arg=Svd.arg,
- Use.Init.Poisson.QO=Use.Init.Poisson.QO),
- checkwz=checkwz,
+ Use.Init.Poisson.QO = Use.Init.Poisson.QO),
+ checkwz = checkwz,
wzepsilon = wzepsilon,
- if (Quadratic) qrrvglm.control(Rank=Rank, ...) else NULL)
+ if (Quadratic) qrrvglm.control(Rank = Rank, ...) else NULL)
if (Quadratic && ans$ITolerances) {
ans$Svd.arg = FALSE
@@ -123,14 +129,18 @@ rrvglm.control = function(Rank = 1,
}
+
+
+
+
setClass("summary.rrvglm",
representation("rrvglm",
- coef3="matrix",
- cov.unscaled="matrix",
- correlation="matrix",
- df="numeric",
- pearson.resid="matrix",
- sigma="numeric"))
+ coef3 = "matrix",
+ cov.unscaled = "matrix",
+ correlation = "matrix",
+ df = "numeric",
+ pearson.resid = "matrix",
+ sigma = "numeric"))
setMethod("summary", "rrvglm",
function(object, ...)
@@ -139,22 +149,28 @@ setMethod("summary", "rrvglm",
-printsummary.rrvglm <- function(x, digits=NULL, quote= TRUE, prefix="")
+show.summary.rrvglm <- function(x, digits = NULL,
+ quote= TRUE, prefix = "")
{
- printsummary.vglm(x, digits = digits, quote = quote, prefix = prefix)
+ show.summary.vglm(x, digits = digits, quote = quote, prefix = prefix)
invisible(x)
+ NULL
}
-setMethod("print", "summary.rrvglm",
- function(x, ...)
- printsummary.rrvglm(x=x, ...))
- setMethod("show", "summary.rrvglm",
- function(object)
- printsummary.rrvglm(x=object))
+
+
+ setMethod("show", "summary.rrvglm",
+ function(object)
+ show.summary.rrvglm(x = object))
+
+
+
+
+
diff --git a/R/rrvglm.fit.q b/R/rrvglm.fit.q
index 3bf9e8d..2e8176b 100644
--- a/R/rrvglm.fit.q
+++ b/R/rrvglm.fit.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -152,12 +152,13 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
deriv.mu <- eval(family at deriv)
wz <- eval(family at weight)
if (control$checkwz)
- wz = checkwz(wz, M=M, trace=trace, wzeps=control$wzepsilon)
+ wz = checkwz(wz, M = M, trace = trace,
+ wzepsilon = control$wzepsilon)
- wz = matrix(wz, nrow=n)
- U <- vchol(wz, M=M, n=n, silent=!trace)
- tvfor <- vforsub(U, as.matrix(deriv.mu), M=M, n=n)
+ wz = matrix(wz, nrow = n)
+ U <- vchol(wz, M = M, n = n, silent=!trace)
+ tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
z = eta + vbacksub(U, tvfor, M, n) - offset # Contains \bI \bnu
rrr.expression = paste("rrr", control$Algorithm,
@@ -371,11 +372,12 @@ rrvglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
wz <- eval(family at weight)
if (control$checkwz)
- wz = checkwz(wz, M=M, trace=trace, wzeps=control$wzepsilon)
+ wz = checkwz(wz, M = M, trace = trace,
+ wzepsilon = control$wzepsilon)
- U <- vchol(wz, M=M, n=n, silent=!trace)
- tvfor <- vforsub(U, as.matrix(deriv.mu), M=M, n=n)
- z <- eta + vbacksub(U, tvfor, M=M, n=n) - offset
+ U <- vchol(wz, M = M, n = n, silent = !trace)
+ tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
+ z <- eta + vbacksub(U, tvfor, M = M, n = n) - offset
c.list <- list(z=as.double(z), fit=as.double(t(eta)), one.more = TRUE,
coeff=as.double(rep(1,ncol(X_vlm_save))), U=as.double(U),
diff --git a/R/s.q b/R/s.q
index 875a6e1..3434900 100644
--- a/R/s.q
+++ b/R/s.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/s.vam.q b/R/s.vam.q
index ccd71f9..c5d6356 100644
--- a/R/s.vam.q
+++ b/R/s.vam.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/smart.R b/R/smart.R
index 99d9313..32a8445 100644
--- a/R/smart.R
+++ b/R/smart.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/step.vglm.q b/R/step.vglm.q
index f6e809d..dd78967 100644
--- a/R/step.vglm.q
+++ b/R/step.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/summary.vgam.q b/R/summary.vgam.q
index 6c68720..fa329fd 100644
--- a/R/summary.vgam.q
+++ b/R/summary.vgam.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -53,8 +53,9 @@ summaryvgam <- function(object, dispersion = NULL,
snames <- names(nldf)
aod[snames, 2] <- round(nldf, 1)
aod[snames, 3] <- if (useF) nl.chisq/nldf else nl.chisq
- aod[snames, 4] <- if (useF) pf(nl.chisq/nldf, nldf, rdf, lower.tail=FALSE) else
- pchisq(nl.chisq, nldf, lower.tail=FALSE)
+ aod[snames, 4] <- if (useF)
+ pf(nl.chisq / nldf, nldf, rdf, lower.tail = FALSE) else
+ pchisq(nl.chisq, nldf, lower.tail = FALSE)
if (any(special)) {
aod[snames[special], 2:4] = NA
@@ -102,7 +103,7 @@ summaryvgam <- function(object, dispersion = NULL,
-printsummary.vgam <- function(x, quote = TRUE, prefix = "",
+show.summary.vgam <- function(x, quote = TRUE, prefix = "",
digits = options()$digits-2) {
M <- x at misc$M
@@ -113,16 +114,17 @@ printsummary.vgam <- function(x, quote = TRUE, prefix = "",
presid <- x at pearson.resid
rdf <- x at df[2]
- if (FALSE && !is.null(presid) && all(!is.na(presid))) {
+ if (FALSE &&
+ !is.null(presid) && all(!is.na(presid))) {
cat("\nPearson Residuals:\n")
if (rdf/M > 5) {
rq <- apply(as.matrix(presid), 2, quantile) # 5 x M
dimnames(rq) <- list(c("Min", "1Q", "Median", "3Q", "Max"),
x at misc$predictors.names)
- print(t(rq), digits=digits)
+ print(t(rq), digits = digits)
} else
if (rdf > 0) {
- print(presid, digits=digits)
+ print(presid, digits = digits)
}
}
@@ -132,7 +134,7 @@ printsummary.vgam <- function(x, quote = TRUE, prefix = "",
if (M == 1)
cat("\nName of linear predictor:",
paste(x at misc$predictors.names, collapse = ", "), "\n") else
- if (M<=5)
+ if (M <= 5)
cat("\nNames of linear predictors:",
paste(x at misc$predictors.names, collapse = ", "), "\n")
@@ -157,7 +159,7 @@ printsummary.vgam <- function(x, quote = TRUE, prefix = "",
}
if (length(deviance(x)))
- cat("\nResidual Deviance: ", format(round(deviance(x), digits)),
+ cat("\nResidual deviance: ", format(round(deviance(x), digits)),
"on", format(round(rdf, 3)), "degrees of freedom\n")
if (length(logLik.vlm(x)))
@@ -172,10 +174,10 @@ printsummary.vgam <- function(x, quote = TRUE, prefix = "",
}
- cat("\nNumber of Iterations: ", x at iter, "\n")
+ cat("\nNumber of iterations: ", x at iter, "\n")
if (length(x at anova)) {
- printvanova(x at anova, dig = digits) # ".vanova" for Splus6
+ show.vanova(x at anova, digits = digits) # ".vanova" for Splus6
}
invisible(NULL)
@@ -188,20 +190,17 @@ printsummary.vgam <- function(x, quote = TRUE, prefix = "",
function(object, ...)
summaryvgam(object, ...))
- setMethod("print", "summary.vgam",
- function(x, ...)
- printsummary.vgam(x, ...))
setMethod("show", "summary.vgam",
function(object)
- printsummary.vgam(object))
+ show.summary.vgam(object))
-printvanova <- function(x, digits=.Options$digits, ...) {
+show.vanova <- function(x, digits = .Options$digits, ...) {
rrr <- row.names(x)
heading <- attr(x, "heading")
if (!is.null(heading))
diff --git a/R/summary.vglm.q b/R/summary.vglm.q
index cf8a01a..678b4fc 100644
--- a/R/summary.vglm.q
+++ b/R/summary.vglm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -59,12 +59,15 @@ summaryvglm <- function(object, correlation = FALSE,
+
setMethod("logLik", "summary.vglm", function(object, ...)
logLik.vlm(object, ...))
-printsummary.vglm <- function(x, digits = NULL, quote = TRUE, prefix = "",
- presid = TRUE) {
+show.summary.vglm <- function(x, digits = NULL, quote = TRUE,
+ prefix = "",
+ presid = TRUE,
+ nopredictors = FALSE) {
M <- x at misc$M
coef <- x at coef3 # icients
@@ -98,13 +101,16 @@ printsummary.vglm <- function(x, digits = NULL, quote = TRUE, prefix = "",
cat("\nNumber of linear predictors: ", M, "\n")
- if (!is.null(x at misc$predictors.names))
- if (M == 1)
- cat("\nName of linear predictor:",
- paste(x at misc$predictors.names, collapse = ", "), "\n") else
- if (M <= 5)
- cat("\nNames of linear predictors:",
- paste(x at misc$predictors.names, collapse = ", "), fill = TRUE)
+ if (!is.null(x at misc$predictors.names) && !nopredictors) {
+ if (M == 1) {
+ cat("\nName of linear predictor:",
+ paste(x at misc$predictors.names, collapse = ", "), "\n")
+ } else
+ if (M <= 5) {
+ cat("\nNames of linear predictors:",
+ paste(x at misc$predictors.names, collapse = ", "), fill = TRUE)
+ }
+ }
prose <- ""
if (length(x at dispersion)) {
@@ -127,7 +133,7 @@ printsummary.vglm <- function(x, digits = NULL, quote = TRUE, prefix = "",
if (length(deviance(x))) {
- cat("\nResidual Deviance:", yformat(deviance(x), digits))
+ cat("\nResidual deviance:", yformat(deviance(x), digits))
if (is.finite(rdf))
cat(" on", round(rdf, digits), "degrees of freedom\n") else
cat("\n")
@@ -151,7 +157,7 @@ printsummary.vglm <- function(x, digits = NULL, quote = TRUE, prefix = "",
}
- cat("\nNumber of Iterations:", format(trunc(x at iter)), "\n")
+ cat("\nNumber of iterations:", format(trunc(x at iter)), "\n")
if (!is.null(correl)) {
ncol_X_vlm <- dim(correl)[2]
@@ -173,13 +179,15 @@ printsummary.vglm <- function(x, digits = NULL, quote = TRUE, prefix = "",
function(object, ...)
summaryvglm(object, ...))
- setMethod("print", "summary.vglm",
- function(x, ...)
- invisible(printsummary.vglm(x, ...)))
+
+
+
setMethod("show", "summary.vglm",
function(object)
- invisible(printsummary.vglm(object)))
+ show.summary.vglm(object))
+
+
@@ -197,7 +205,7 @@ vcovdefault <- function(object, ...) {
vcovvlm <- function(object, dispersion = NULL, untransform = FALSE) {
- so <- summaryvlm(object, corr = FALSE, dispersion = dispersion)
+ so <- summaryvlm(object, correlation = FALSE, dispersion = dispersion)
d = if (any(slotNames(so) == "dispersion") &&
is.Numeric(so at dispersion)) so at dispersion else 1
answer = d * so at cov.unscaled
@@ -256,10 +264,13 @@ vcovdefault <- function(object, ...) {
+
+
setMethod("vcov", "vlm",
function(object, ...)
vcovvlm(object, ...))
+
setMethod("vcov", "vglm",
function(object, ...)
vcovvlm(object, ...))
@@ -268,3 +279,5 @@ setMethod("vcov", "vglm",
+
+
diff --git a/R/summary.vlm.q b/R/summary.vlm.q
index a6253de..52310d0 100644
--- a/R/summary.vlm.q
+++ b/R/summary.vlm.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -13,103 +13,103 @@
summaryvlm <- function(object, correlation = FALSE, dispersion = NULL) {
- if (is.logical(object at misc$BFGS) && object at misc$BFGS)
- warning(paste("the estimated variance-covariance matrix is",
- "usually inaccurate as the working weight matrices are a",
- "crude BFGS quasi-Newton approximation"))
-
- M <- object at misc$M
- n <- object at misc$n
- nrow_X_vlm <- object at misc$nrow_X_vlm
- ncol_X_vlm <- object at misc$ncol_X_vlm # May be NULL for CQO objects
-
- coef <- object at coefficients
- cnames <- names(coef)
- presid = residualsvlm(object, type = "pearson") # NULL if pooled.weight
-
- if (any(is.na(coef))) {
- warning(paste("Some NAs in the coefficients---no summary",
- " provided; returning object\n"))
- return(object)
- }
- rdf <- object at df.residual
-
- if (!length(dispersion)) {
- if (is.numeric(object at misc$dispersion)) {
- dispersion <- object at misc$dispersion
- if (all(dispersion==0))
- stop("dispersion shouldn't be zero here!")
- } else {
- dispersion <- 1
- object at misc$estimated.dispersion <- FALSE
- }
- } else if (dispersion==0) {
- dispersion <- if (!length(object at rss)) {
- stop("object at rss is empty")
- } else {
- object at rss / object at df.residual
- }
- object at misc$estimated.dispersion <- TRUE
- } else {
- if (is.numeric(object at misc$dispersion) &&
- object at misc$dispersion != dispersion)
- warning("overriding the value of object at misc$dispersion")
- object at misc$estimated.dispersion <- FALSE
- }
- sigma <- dispersion^0.5 # Can be a vector
-
- if (is.Numeric(ncol_X_vlm)) {
- R <- object at R
-
- if (ncol_X_vlm < max(dim(R)))
- stop("R is rank deficient")
-
- rinv = diag(ncol_X_vlm)
- rinv = backsolve(R, rinv)
- rowlen = drop(((rinv^2) %*% rep(1, ncol_X_vlm))^0.5)
- covun = rinv %*% t(rinv)
- dimnames(covun) <- list(cnames, cnames)
- }
- coef <- matrix(rep(coef, 3), ncol=3)
- dimnames(coef) <- list(cnames, c("Value", "Std. Error", "t value"))
- if (length(sigma) == 1 && is.Numeric(ncol_X_vlm)) {
- coef[, 2] <- rowlen %o% sigma # Fails here when sigma is a vector
- coef[, 3] <- coef[, 1] / coef[, 2]
- } else {
- coef[,1] = coef[,2] = coef[,3] = NA
- }
- if (correlation) {
- correl <- covun * outer(1 / rowlen, 1 / rowlen)
- dimnames(correl) <- list(cnames, cnames)
- } else {
- correl <- matrix(0, 0, 0) # was NULL, but now a special matrix
- }
-
-
-
-
- answer <-
- new("summary.vlm",
- object,
- coef3 = coef,
- correlation = correl,
- df = c(ncol_X_vlm, rdf),
- sigma = sigma)
-
- if (is.Numeric(ncol_X_vlm)) answer at cov.unscaled = covun
- answer at dispersion = dispersion # Overwrite this
-
- if (length(presid))
- answer at pearson.resid = as.matrix(presid)
-
-
- answer
+ if (is.logical(object at misc$BFGS) && object at misc$BFGS)
+ warning(paste("the estimated variance-covariance matrix is",
+ "usually inaccurate as the working weight matrices are a",
+ "crude BFGS quasi-Newton approximation"))
+
+ M <- object at misc$M
+ n <- object at misc$n
+ nrow_X_vlm <- object at misc$nrow_X_vlm
+ ncol_X_vlm <- object at misc$ncol_X_vlm # May be NULL for CQO objects
+
+ coef <- object at coefficients
+ cnames <- names(coef)
+ presid = residualsvlm(object, type = "pearson") # NULL if pooled.weight
+
+ if (any(is.na(coef))) {
+ warning(paste("Some NAs in the coefficients---no summary",
+ " provided; returning object\n"))
+ return(object)
+ }
+ rdf <- object at df.residual
+
+ if (!length(dispersion)) {
+ if (is.numeric(object at misc$dispersion)) {
+ dispersion <- object at misc$dispersion
+ if (all(dispersion == 0))
+ stop("dispersion shouldn't be zero here!")
+ } else {
+ dispersion <- 1
+ object at misc$estimated.dispersion <- FALSE
+ }
+ } else if (dispersion == 0) {
+ dispersion <- if (!length(object at rss)) {
+ stop("object at rss is empty")
+ } else {
+ object at rss / object at df.residual
+ }
+ object at misc$estimated.dispersion <- TRUE
+ } else {
+ if (is.numeric(object at misc$dispersion) &&
+ object at misc$dispersion != dispersion)
+ warning("overriding the value of object at misc$dispersion")
+ object at misc$estimated.dispersion <- FALSE
+ }
+ sigma <- dispersion^0.5 # Can be a vector
+
+ if (is.Numeric(ncol_X_vlm)) {
+ R <- object at R
+
+ if (ncol_X_vlm < max(dim(R)))
+ stop("R is rank deficient")
+
+ rinv = diag(ncol_X_vlm)
+ rinv = backsolve(R, rinv)
+ rowlen = drop(((rinv^2) %*% rep(1, ncol_X_vlm))^0.5)
+ covun = rinv %*% t(rinv)
+ dimnames(covun) <- list(cnames, cnames)
+ }
+ coef <- matrix(rep(coef, 3), ncol=3)
+ dimnames(coef) <- list(cnames, c("Estimate", "Std. Error", "z value"))
+ if (length(sigma) == 1 && is.Numeric(ncol_X_vlm)) {
+ coef[, 2] <- rowlen %o% sigma # Fails here when sigma is a vector
+ coef[, 3] <- coef[, 1] / coef[, 2]
+ } else {
+ coef[,1] = coef[,2] = coef[,3] = NA
+ }
+ if (correlation) {
+ correl <- covun * outer(1 / rowlen, 1 / rowlen)
+ dimnames(correl) <- list(cnames, cnames)
+ } else {
+ correl <- matrix(0, 0, 0) # was NULL, but now a special matrix
+ }
+
+
+
+
+ answer <-
+ new("summary.vlm",
+ object,
+ coef3 = coef,
+ correlation = correl,
+ df = c(ncol_X_vlm, rdf),
+ sigma = sigma)
+
+ if (is.Numeric(ncol_X_vlm)) answer at cov.unscaled = covun
+ answer at dispersion = dispersion # Overwrite this
+
+ if (length(presid))
+ answer at pearson.resid = as.matrix(presid)
+
+
+ answer
}
-printsummary.vlm <- function(x, digits = NULL, quote = TRUE,
+show.summary.vlm <- function(x, digits = NULL, quote = TRUE,
prefix = "") {
@@ -187,14 +187,12 @@ printsummary.vlm <- function(x, digits = NULL, quote = TRUE,
function(object, ...)
summaryvlm(object, ...))
- setMethod("print", "summary.vlm",
- function(x, ...)
- invisible(printsummary.vlm(x, ...)))
+
setMethod("show", "summary.vlm",
function(object)
- invisible(printsummary.vlm(object)))
+ show.summary.vlm(object))
diff --git a/R/uqo.R b/R/uqo.R
index 403af66..298b8c7 100644
--- a/R/uqo.R
+++ b/R/uqo.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -7,16 +7,16 @@
-uqo.control = function(Rank=1,
+uqo.control = function(Rank = 1,
Bestof = if (length(lvstart) && !jitter.sitescores) 1 else 10,
CA1 = FALSE,
Crow1positive = TRUE,
epsilon = 1.0e-07,
EqualTolerances = ITolerances,
Etamat.colmax = 10,
- GradientFunction=TRUE,
+ GradientFunction = TRUE,
Hstep = 0.001,
- isdlv = rep(c(2, 1, rep(0.5, len=Rank)), len=Rank),
+ isdlv = rep(c(2, 1, rep(0.5, len = Rank)), len = Rank),
ITolerances = FALSE,
lvstart = NULL,
jitter.sitescores = FALSE,
@@ -28,60 +28,71 @@ uqo.control = function(Rank=1,
SD.sitescores = 1.0,
SmallNo = 5.0e-13,
trace = TRUE,
- Use.Init.Poisson.QO=TRUE,
+ Use.Init.Poisson.QO = TRUE,
...)
{
Kinit = 0.001
- if (!is.Numeric(MUXfactor, posit=TRUE))
- stop("bad input for \"MUXfactor\"")
- if (any(MUXfactor < 1 | MUXfactor > 10))
- stop("MUXfactor values must lie between 1 and 10")
- if (!is.Numeric(isdlv, posit=TRUE)) stop("bad input for \"isdlv\"")
+ if (!is.Numeric(MUXfactor, positive = TRUE))
+ stop("bad input for \"MUXfactor\"")
+ if (any(MUXfactor < 1 |
+ MUXfactor > 10))
+ stop("MUXfactor values must lie between 1 and 10")
+ if (!is.Numeric(isdlv, positive = TRUE))
+ stop("bad input for \"isdlv\"")
if (any(isdlv < 0.2 | isdlv > 10))
- stop("isdlv values must lie between 0.2 and 10")
+ stop("isdlv values must lie between 0.2 and 10")
if (length(isdlv) > 1 && any(diff(isdlv) > 0))
- stop("successive isdlv values must not increase")
- if (!is.Numeric(Rank, allow=1, integ=TRUE, posit=TRUE))
- stop("Bad input for \"Rank\"")
- if (!is.Numeric(Bestof, allow=1, integ=TRUE, posit=TRUE))
- stop("Bad input for \"Bestof\"")
- if (!is.Numeric(Etamat.colmax, posit=TRUE, allow=1) || Etamat.colmax < Rank)
- stop("bad input for \"Etamat.colmax\"")
- if (!is.Numeric(maxitl, allow=1, integ=TRUE, posit=TRUE))
- stop("Bad input for \"maxitl\"")
- if (!is.Numeric(Maxit.optim, integ=TRUE, posit=TRUE, allow=1))
- stop("Bad input for \"Maxit.optim\"")
- if (!is.Numeric(optim.maxit, allow=1, integ=TRUE, posit=TRUE))
- stop("Bad input for \"optim.maxit\"")
- if (!is.Numeric(nRmax, allow=1, integ=TRUE, posit=TRUE))
- stop("Bad input for \"nRmax\"")
- if (!is.Numeric(Hstep, allow=1, posit=TRUE))
- stop("Bad input for \"Hstep\"")
- if (!is.Numeric(epsilon, allow=1, posit=TRUE))
- stop("Bad input for \"epsilon\"")
- if (!is.Numeric(SmallNo, allow=1, posit=TRUE))
- stop("Bad input for \"SmallNo\"")
+ stop("successive isdlv values must not increase")
+
+ if (!is.Numeric(Rank, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
+ stop("Bad input for \"Rank\"")
+ if (!is.Numeric(Bestof, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
+ stop("Bad input for \"Bestof\"")
+ if (!is.Numeric(Etamat.colmax, positive = TRUE,
+ allowable.length = 1) ||
+ Etamat.colmax < Rank)
+ stop("bad input for \"Etamat.colmax\"")
+ if (!is.Numeric(maxitl, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
+ stop("Bad input for \"maxitl\"")
+ if (!is.Numeric(Maxit.optim, integer.valued = TRUE,
+ positive = TRUE, allowable.length = 1))
+ stop("Bad input for \"Maxit.optim\"")
+ if (!is.Numeric(optim.maxit, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
+ stop("Bad input for \"optim.maxit\"")
+ if (!is.Numeric(nRmax, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
+ stop("Bad input for \"nRmax\"")
+ if (!is.Numeric(Hstep, allowable.length = 1, positive = TRUE))
+ stop("Bad input for \"Hstep\"")
+ if (!is.Numeric(epsilon, allowable.length = 1, positive = TRUE))
+ stop("Bad input for \"epsilon\"")
+ if (!is.Numeric(SmallNo, allowable.length = 1, positive = TRUE))
+ stop("Bad input for \"SmallNo\"")
if ((SmallNo < .Machine$double.eps) || (SmallNo > .0001))
- stop("SmallNo is out of range")
+ stop("SmallNo is out of range")
if (Use.Init.Poisson.QO && CA1)
- stop("cannot have both Use.Init.Poisson.QO=TRUE and CA1=TRUE")
+ stop("cannot have both 'Use.Init.Poisson.QO = TRUE' and 'CA1 = TRUE'")
ans = list(
Bestof = Bestof,
CA1 = CA1,
ConstrainedQO = FALSE, # A constant, not a control parameter
Corner = FALSE, # Needed for valt.1iter()
- Crow1positive=as.logical(rep(Crow1positive, len=Rank)),
+ Crow1positive=as.logical(rep(Crow1positive, len = Rank)),
epsilon = epsilon,
EqualTolerances = as.logical(EqualTolerances)[1],
Etamat.colmax = Etamat.colmax,
FastAlgorithm = TRUE, # A constant, not a control parameter
GradientFunction = GradientFunction,
Hstep = Hstep,
- isdlv = rep(isdlv, len=Rank),
+ isdlv = rep(isdlv, len = Rank),
ITolerances = as.logical(ITolerances)[1],
lvstart = lvstart,
jitter.sitescores = as.logical(jitter.sitescores),
@@ -106,16 +117,16 @@ uqo.control = function(Rank=1,
uqo <- function(formula,
family, data=list(),
- weights=NULL, subset=NULL, na.action=na.fail,
- etastart=NULL, mustart=NULL, coefstart=NULL,
- control=uqo.control(...),
- offset=NULL,
- method="uqo.fit",
- model=FALSE, x.arg=TRUE, y.arg=TRUE,
- contrasts=NULL,
- constraints=NULL,
- extra=NULL,
- qr.arg=FALSE, ...)
+ weights = NULL, subset = NULL, na.action = na.fail,
+ etastart = NULL, mustart = NULL, coefstart = NULL,
+ control = uqo.control(...),
+ offset = NULL,
+ method = "uqo.fit",
+ model = FALSE, x.arg = TRUE, y.arg = TRUE,
+ contrasts = NULL,
+ constraints = NULL,
+ extra = NULL,
+ qr.arg = FALSE, ...)
{
dataname <- as.character(substitute(data)) # "list" if no data=
function.name <- "uqo"
@@ -126,7 +137,7 @@ uqo <- function(formula,
if (missing(data))
data <- environment(formula)
- mf <- match.call(expand=FALSE)
+ mf <- match.call(expand.dots = FALSE)
mf$family <- mf$method <- mf$model <- mf$x.arg <- mf$y.arg <- mf$control <-
mf$contrasts <- mf$constraints <- mf$extra <- mf$qr.arg <- NULL
mf$coefstart <- mf$etastart <- mf$... <- NULL
@@ -154,7 +165,7 @@ uqo <- function(formula,
w <- model.weights(mf)
if (!length(w))
w <- rep(1, nrow(mf))
- else if (ncol(as.matrix(w))==1 && any(w < 0))
+ else if (ncol(as.matrix(w))== 1 && any(w < 0))
stop("negative weights not allowed")
if (is.character(family))
@@ -162,7 +173,7 @@ uqo <- function(formula,
if (is.function(family))
family <- family()
if (!inherits(family, "vglmff")) {
- stop("'family=", family, "' is not a VGAM family function")
+ stop("'family = ", family, "' is not a VGAM family function")
}
if (!is.null(family at first))
@@ -176,19 +187,19 @@ uqo <- function(formula,
length(as.list(family at deviance)) <= 1)
stop("The fast algorithm requires the family ",
"function to have a deviance slot")
- deviance.Bestof = rep(as.numeric(NA), len=control$Bestof)
+ deviance.Bestof = rep(as.numeric(NA), len = control$Bestof)
for(tries in 1:control$Bestof) {
if (control$trace && (control$Bestof>1))
cat(paste("\n========================= Fitting model", tries,
"=========================\n"))
- it <- uqo.fitter(x=x, y=y, w=w, offset=offset,
- etastart=etastart, mustart=mustart, coefstart=coefstart,
- family=family, control=control,
- constraints=constraints, extra=extra,
- qr.arg = qr.arg, Terms=mt, function.name=function.name,
- ca1 = control$CA1 && tries==1, ...)
+ it <- uqo.fitter(x = x, y = y, w = w, offset = offset,
+ etastart = etastart, mustart = mustart, coefstart = coefstart,
+ family = family, control = control,
+ constraints = constraints, extra = extra,
+ qr.arg = qr.arg, Terms = mt, function.name = function.name,
+ ca1 = control$CA1 && tries == 1, ...)
deviance.Bestof[tries] = it$crit.list$deviance
- if (tries==1||min(deviance.Bestof[1:(tries-1)]) > deviance.Bestof[tries])
+ if (tries == 1||min(deviance.Bestof[1:(tries-1)]) > deviance.Bestof[tries])
fit = it
}
fit$misc$deviance.Bestof = deviance.Bestof
@@ -241,10 +252,10 @@ calluqof = function(sitescores, etamat, ymat, wvec, modelno, nice31, xmat,
Rank = control$Rank
itol = othint[14]
inited = if (is.R()) {
- as.numeric(existsinVGAMenv("etamat", prefix=".VGAM.UQO."))
+ as.numeric(existsinVGAMenv("etamat", prefix = ".VGAM.UQO."))
} else 0
othint[5] = inited # Replacement
- usethiseta = if (inited==1)
+ usethiseta = if (inited == 1)
getfromVGAMenv("etamat", prefix = ".VGAM.UQO.") else t(etamat)
usethisbeta = double(othint[13])
pstar = othint[3]
@@ -265,8 +276,8 @@ calluqof = function(sitescores, etamat, ymat, wvec, modelno, nice31, xmat,
sdnumat = apply(numat, 2, sd)
for(lookat in 1:Rank)
if (sdnumat[lookat]>control$MUXfactor[lookat]*control$isdlv[lookat]){
- muxer = control$isdlv[lookat] * control$MUXfactor[lookat] /
- sdnumat[lookat]
+ muxer = control$isdlv[lookat] *
+ control$MUXfactor[lookat] / sdnumat[lookat]
numat[,lookat] = numat[,lookat] * muxer
if (control$trace) {
}
@@ -297,15 +308,15 @@ calluqof = function(sitescores, etamat, ymat, wvec, modelno, nice31, xmat,
othdbl=as.double(othdbl))
if (ans1$errcode == 0) {
- assign2VGAMenv(c("etamat","numat"), ans1, prefix=".VGAM.UQO.")
+ assign2VGAMenv(c("etamat","numat"), ans1, prefix = ".VGAM.UQO.")
if (alldump) {
- ans1$fv = matrix(ans1$fv,n,M,byrow=TRUE,dimnames=dimnames(ymat))
- assign2VGAMenv(c("beta","fv"), ans1, prefix=".VGAM.UQO.")
- assign2VGAMenv(c("z","U"), ans1, prefix=".VGAM.UQO.")
+ ans1$fv = matrix(ans1$fv,n,M,byrow = TRUE,dimnames=dimnames(ymat))
+ assign2VGAMenv(c("beta","fv"), ans1, prefix = ".VGAM.UQO.")
+ assign2VGAMenv(c("z","U"), ans1, prefix = ".VGAM.UQO.")
}
} else {
- cat("warning in calluqof: error code =", ans1$errcode, "\n")
- rmfromVGAMenv(c("etamat"), prefix=".VGAM.UQO.")
+ cat("warning in calluqof: error code = ", ans1$errcode, "\n")
+ rmfromVGAMenv(c("etamat"), prefix = ".VGAM.UQO.")
}
ans1$deviance
}
@@ -319,7 +330,7 @@ callduqof = function(sitescores, etamat, ymat, wvec, modelno, nice31, xmat,
if (exists(".VGAM.UQO.etamat", envir = VGAM:::VGAMenv)) 1 else 0
} else 0 # 0 means fortran initializes the etamat
othint[5] = inited # Replacement
- usethiseta = if (inited==1)
+ usethiseta = if (inited == 1)
getfromVGAMenv("etamat", prefix = ".VGAM.UQO.") else t(etamat)
usethisbeta = double(othint[13])
pstar = othint[3]
@@ -338,13 +349,13 @@ callduqof = function(sitescores, etamat, ymat, wvec, modelno, nice31, xmat,
sdnumat = apply(numat, 2, sd)
for(lookat in 1:Rank)
- if (sdnumat[lookat]>control$MUXfactor[lookat]*control$isdlv[lookat]){
- muxer = control$isdlv[lookat] * control$MUXfactor[lookat] /
- sdnumat[lookat]
+ if (sdnumat[lookat]>control$MUXfactor[lookat]*control$isdlv[lookat]){
+ muxer = control$isdlv[lookat] *
+ control$MUXfactor[lookat] / sdnumat[lookat]
numat[,lookat] = numat[,lookat] * muxer
if (control$trace) {
}
- }
+ }
} else {
numat = matrix(sitescores, ncol=Rank)
evnu = eigen(var(numat))
@@ -375,10 +386,10 @@ callduqof = function(sitescores, etamat, ymat, wvec, modelno, nice31, xmat,
betasave=usethisbeta)
if (ans1$errcode == 0) {
- assign2VGAMenv(c("etamat"), ans1, prefix=".VGAM.UQO.")
+ assign2VGAMenv(c("etamat"), ans1, prefix = ".VGAM.UQO.")
} else {
- cat("warning in callduqof: error code =", ans1$errcode, "\n")
- rmfromVGAMenv(c("etamat"), prefix=".VGAM.UQO.")
+ cat("warning in callduqof: error code = ", ans1$errcode, "\n")
+ rmfromVGAMenv(c("etamat"), prefix = ".VGAM.UQO.")
}
ans1$deriv
}
@@ -386,11 +397,11 @@ callduqof = function(sitescores, etamat, ymat, wvec, modelno, nice31, xmat,
-uqo.fit <- function(x, y, w=rep(1, len=nrow(x)),
- etastart=NULL, mustart=NULL, coefstart=NULL,
- offset=0, family, control=uqo.control(...),
- qr.arg=FALSE, constraints=NULL, extra=NULL,
- Terms=Terms, function.name="uqo", ca1=TRUE, ...)
+uqo.fit <- function(x, y, w = rep(1, len = nrow(x)),
+ etastart = NULL, mustart = NULL, coefstart = NULL,
+ offset = 0, family, control = uqo.control(...),
+ qr.arg = FALSE, constraints = NULL, extra = NULL,
+ Terms=Terms, function.name = "uqo", ca1 = TRUE, ...)
{
if (!all(offset == 0)) stop("cqo.fit() cannot handle offsets")
nonparametric <- FALSE
@@ -466,7 +477,7 @@ uqo.fit <- function(x, y, w=rep(1, len=nrow(x)),
} else {
if (rrcontrol$Use.Init.Poisson) {
.Init.Poisson.QO(ymat=as.matrix(y),
- X1=x, X2=NULL,
+ X1=x, X2 = NULL,
Rank=rrcontrol$Rank, trace=rrcontrol$trace,
max.ncol.etamat = rrcontrol$Etamat.colmax,
Crow1positive=rrcontrol$Crow1positive,
@@ -493,17 +504,17 @@ uqo.fit <- function(x, y, w=rep(1, len=nrow(x)),
modelno = switch(family at vfamily[1], "poissonff"=2,
- "binomialff"=1, "quasipoissonff"=0, "quasibinomialff"=0,
- "negbinomial"=0,
+ "binomialff" = 1, "quasipoissonff" = 0, "quasibinomialff" = 0,
+ "negbinomial" = 0,
"gamma2"=5,
0) # stop("can't fit this model using fast algorithm")
if (!modelno) stop("the family function does not work with uqo()")
if (modelno == 1) modelno = get("modelno", envir = VGAM:::VGAMenv)
- rmfromVGAMenv(c("etamat", "beta"), prefix=".VGAM.UQO.")
+ rmfromVGAMenv(c("etamat", "beta"), prefix = ".VGAM.UQO.")
cqofastok = if (is.R()) (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv) &&
get("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)) else
- (exists("CQO.FastAlgorithm", inherits=TRUE) && CQO.FastAlgorithm)
+ (exists("CQO.FastAlgorithm", inherits = TRUE) && CQO.FastAlgorithm)
if (!cqofastok)
stop("can't fit this model using fast algorithm")
@@ -529,19 +540,19 @@ uqo.fit <- function(x, y, w=rep(1, len=nrow(x)),
maxMr5 = maxMr*(maxMr+1)/2
lenbeta = pstar * ifelse(nice31, NOS, 1)
- othint = c(Rank, control$EqualTol, pstar, dimw=1, inited=290, # other ints
- modelno, maxitl=control$maxitl, actnits=0, twice=0, p1star,
+ othint = c(Rank, control$EqualTol, pstar, dimw = 1, inited=290, # other ints
+ modelno, maxitl=control$maxitl, actnits = 0, twice = 0, p1star,
p2star, nice31, lenbeta, control$ITolerances, control$trace,
p1, p2, control$imethod)
othdbl = c(small=control$SmallNo, fseps=control$epsilon,
.Machine$double.eps,
- kinit=rep(control$Kinit, len=NOS),
- shapeinit=rep(control$shapeinit, len=NOS))
+ kinit=rep(control$Kinit, len = NOS),
+ shapeinit=rep(control$shapeinit, len = NOS))
bnumat = if (nice31) matrix(0,nstar,pstar) else
cbind(matrix(0,nstar,p2star), X_vlm_1save)
rmfromVGAMenv(c("etamat", "z", "U", "beta", "deviance", "fv",
- "cmatrix", "ocmatrix"), prefix=".VGAM.UQO.")
+ "cmatrix", "ocmatrix"), prefix = ".VGAM.UQO.")
for(iter in 1:optim.maxit) {
@@ -550,20 +561,21 @@ uqo.fit <- function(x, y, w=rep(1, len=nrow(x)),
conjgrad <- optim(par=sitescores, fn=calluqof,
gr = if (control$GradientFunction) callduqof else NULL,
method = if (n*Rank>control$nRmax) "CG" else "BFGS",
- control=list(fnscale=1, trace=as.integer(control$trace),
+ control=list(fnscale = 1, trace=as.integer(control$trace),
maxit=control$Maxit.optim),
etamat=eta, ymat=y, wvec=w, modelno=modelno,
Control=rrcontrol,
nice31=nice31, xmat = x,
n=n, M=M, maxMr5=maxMr5, othint=othint, othdbl=othdbl,
- bnumat=bnumat, Hstep=control$Hstep, alldump=FALSE)
+ bnumat=bnumat, Hstep=control$Hstep, alldump = FALSE)
sitescores = getfromVGAMenv("numat", prefix = ".VGAM.UQO.")
dim(sitescores) = c(n, Rank)
sitescores = scale(sitescores, center = TRUE, scale = FALSE)
sitescores = crow1C(sitescores, rrcontrol$Crow1positive)
- dimnames(sitescores) = list(dimnames(y)[[1]], if (Rank==1) "lv" else
- paste("lv", 1:Rank, sep=""))
+ dimnames(sitescores) = list(dimnames(y)[[1]],
+ if (Rank == 1) "lv" else
+ paste("lv", 1:Rank, sep = ""))
if (converged <- (conjgrad$convergence == 0)) break
}
@@ -577,7 +589,7 @@ uqo.fit <- function(x, y, w=rep(1, len=nrow(x)),
nice31=nice31, xmat = x,
Control=rrcontrol,
n=n, M=M, maxMr5=maxMr5, othint=othint, othdbl=othdbl,
- bnumat=bnumat, Hstep=NA, alldump=TRUE)
+ bnumat=bnumat, Hstep=NA, alldump = TRUE)
coefs = getfromVGAMenv("beta", prefix = ".VGAM.UQO.")
VGAM.fv = getfromVGAMenv("fv", prefix = ".VGAM.UQO.")
@@ -589,29 +601,29 @@ uqo.fit <- function(x, y, w=rep(1, len=nrow(x)),
if (!intercept.only)
- stop("can only handle intercept.only==TRUE currently")
+ stop("can only handle intercept.only == TRUE currently")
if (nice31) {
coefs = c(t(matrix(coefs, ncol=M))) # Get into right order
coefs = matrix(coefs, nrow=M)
- Amat = coefs[,1:Rank,drop=FALSE]
+ Amat = coefs[,1:Rank,drop = FALSE]
if (rrcontrol$IToleran) {
- B1 = coefs[,-(1:Rank),drop=FALSE]
+ B1 = coefs[,-(1:Rank),drop = FALSE]
Dmat = matrix(0, M, Rank*(Rank+1)/2)
Dmat[,1:Rank] = -0.5
} else {
- Dmat = coefs[,(Rank+1):(Rank + Rank*(Rank+1)/2),drop=FALSE]
- B1 = coefs[,(1+(Rank + Rank*(Rank+1)/2)):ncol(coefs),drop=FALSE]
+ Dmat = coefs[,(Rank+1):(Rank + Rank*(Rank+1)/2),drop = FALSE]
+ B1 = coefs[,(1+(Rank + Rank*(Rank+1)/2)):ncol(coefs),drop = FALSE]
}
} else {
Amat = t(matrix(coefs[1:(Rank*M)], Rank, M))
cptr1 = (Rank*M)
Dmat = coefs[(cptr1+1):(cptr1+Rank*(Rank+1)/2)]
- Dmat = matrix(Dmat, M, Rank*(Rank+1)/2, byrow=TRUE)
+ Dmat = matrix(Dmat, M, Rank*(Rank+1)/2, byrow = TRUE)
cptr1 = (Rank*M) + Rank*(Rank+1)/2
B1 = coefs[(cptr1+1):length(coefs)]
}
- lv.names = if (Rank==1) "lv" else paste("lv", 1:Rank, sep="")
+ lv.names = if (Rank == 1) "lv" else paste("lv", 1:Rank, sep = "")
lp.names = predictors.names
if (!length(lp.names)) lp.names = NULL
extra$Amat = matrix(Amat, M, Rank, dimnames = list(lp.names, lv.names))
@@ -621,12 +633,12 @@ uqo.fit <- function(x, y, w=rep(1, len=nrow(x)),
extra$Cmat = NULL # This is UQO!!
VGAM.etamat = getfromVGAMenv("etamat", prefix = ".VGAM.UQO.")
- VGAM.etamat = matrix(VGAM.etamat, n, M, byrow=TRUE,
+ VGAM.etamat = matrix(VGAM.etamat, n, M, byrow = TRUE,
dimnames = list(dimnames(y)[[1]], predictors.names))
coefficients = c(coefs) # Make a vector because of class "numeric"
- rmfromVGAMenv(c("etamat", "beta", "fv"), prefix=".VGAM.UQO.")
+ rmfromVGAMenv(c("etamat", "beta", "fv"), prefix = ".VGAM.UQO.")
if (length(family at fini))
eval(family at fini)
@@ -665,29 +677,35 @@ uqo.fit <- function(x, y, w=rep(1, len=nrow(x)),
-printuqo <- function(x, ...)
+show.uqo <- function(object)
{
- if (!is.null(cl <- x at call)) {
- cat("Call:\n")
- dput(cl)
- }
+ if (!is.null(cl <- object at call)) {
+ cat("Call:\n")
+ dput(cl)
+ }
+
+ cat("\n")
+ cat(object at misc$n, "sites and", object at misc$M, "responses/species\n")
+ cat("Rank", object at control$Rank)
+ cat(",", ifelse(object at control$EqualToler, "equal-tolerances",
+ "unequal-tolerances"), "\n")
+
+ if (length(deviance(object)))
+ cat("\nResidual deviance:", format(deviance(object)), "\n")
+
+ invisible(object)
+ NULL
+}
- cat("\n")
- cat(x at misc$n, "sites and", x at misc$M, "responses/species\n")
- cat("Rank", x at control$Rank)
- cat(",", ifelse(x at control$EqualToler, "equal-tolerances",
- "unequal-tolerances"), "\n")
- if (length(deviance(x)))
- cat("\nResidual Deviance:", format(deviance(x)), "\n")
- invisible(x)
-}
-setMethod("print", "uqo", function(x, ...) printuqo(x, ...))
- setMethod("show", "uqo", function(object) printuqo(object))
+ setMethod("show", "uqo", function(object) show.uqo(object))
+
+
+
@@ -708,10 +726,12 @@ setMethod("Coef", "uqo", function(object, ...)
-setMethod("show", "Coef.uqo", function(object)
- printCoef.qrrvglm(object, C = FALSE))
-setMethod("print", "Coef.uqo", function(x, ...)
- printCoef.qrrvglm(x, ...))
+
+
+
+setMethod("show", "Coef.uqo",
+ function(object)
+ show.Coef.qrrvglm(object, C = FALSE))
@@ -722,7 +742,8 @@ residualsuqo <- function(object,
if (mode(type) != "character" && mode(type) != "name")
type = as.character(substitute(type))
- type = match.arg(type, c("deviance", "pearson", "working", "response"))[1]
+ type = match.arg(type,
+ c("deviance", "pearson", "working", "response"))[1]
switch(type,
response = object at y - fitted(object),
@@ -730,14 +751,17 @@ residualsuqo <- function(object,
)
}
+
setMethod("resid", "uqo", function(object, ...)
residualsuqo(object, ...))
setMethod("residuals", "uqo", function(object, ...)
residualsuqo(object, ...))
+
fitted.values.uqo <- function(object, ...)
object at fitted.values
+
setMethod("fitted", "uqo", function(object, ...)
fitted.values.uqo(object, ...))
setMethod("fitted.values", "uqo", function(object, ...)
@@ -759,7 +783,7 @@ setMethod("persp", "uqo", function(x, ...)
perspqrrvglm(x, ...))
setMethod("trplot", "uqo", function(object, ...)
- trplot.qrrvglm(object, check.ok=FALSE, ...))
+ trplot.qrrvglm(object, check.ok = FALSE, ...))
setMethod("plot", "uqo", function(x, y, ...)
@@ -767,14 +791,14 @@ setMethod("plot", "uqo", function(x, y, ...)
setMethod("lvplot", "uqo", function(object, ...)
- invisible(lvplot.qrrvglm(object, C=FALSE, check.ok=FALSE, ...)))
+ invisible(lvplot.qrrvglm(object, C = FALSE, check.ok = FALSE, ...)))
.VGAM.UQO.CA = function(Y) {
Y = as.matrix(Y) / sum(Y)
- rowsum = c(Y %*% rep(1, len=ncol(Y)))
- colsum = c(t(Y) %*% rep(1, len=nrow(Y)))
+ rowsum = c(Y %*% rep(1, len = ncol(Y)))
+ colsum = c(t(Y) %*% rep(1, len = nrow(Y)))
rc = outer(rowsum, colsum)
Ybar = (Y - rc) / sqrt(rc)
Q = qr(Ybar)
@@ -782,7 +806,7 @@ setMethod("lvplot", "uqo", function(object, ...)
temp = svd(Ybar)
colnames(temp$u) = paste("CA", 1:length(temp$d), sep = "")
rownames(temp$u) = dimnames(Y)[[1]]
- sweep(as.matrix(temp$u[,1:Q$rank, drop=FALSE]),
+ sweep(as.matrix(temp$u[,1:Q$rank, drop = FALSE]),
1, 1/sqrt(rowsum), "*")
} else stop("Null rank")
}
@@ -796,18 +820,21 @@ if (FALSE) {
type = match.arg(type, c("sites", "species"))[1]
switch(type,
- sites = if (any(slotNames(x)=="lv")) x at lv else Coef(x)@lv,
- species = if (any(slotNames(x)=="Optimum")) x at Optimum else Coef(x)@Optimum
+ sites = if (any(slotNames(x) == "lv")) x at lv else Coef(x)@lv,
+ species = if (any(slotNames(x) == "Optimum")) x at Optimum else
+ Coef(x)@Optimum
)
}
setMethod("scores", "uqo", function(x, ...) scores.uqo(x, ...))
}
+
jitteruqo = function(mat) {
mat * ifelse(runif(length(mat)) < 0.5, -1, 1)
}
+
setMethod("Opt", "uqo", function(object, ...) Opt.qrrvglm(object, ...))
setMethod("Max", "uqo", function(object, ...) Max.qrrvglm(object, ...))
setMethod("lv", "uqo", function(object, ...) lv.qrrvglm(object, ...))
@@ -829,12 +856,12 @@ summary.uqo = function(object, ...) {
answer
}
-printsummary.uqo = function(x, ...) {
+show.summary.uqo = function(x, ...) {
cat("\nCall:\n")
dput(x at call)
- printCoef.qrrvglm(x, ...)
+ show.Coef.qrrvglm(x, ...)
cat("\nNumber of responses/species: ", x at NOS, "\n")
@@ -850,13 +877,14 @@ setClass("summary.uqo", representation("Coef.uqo",
setMethod("summary", "uqo", function(object, ...)
summary.uqo(object, ...))
-setMethod("print", "summary.uqo",
- function(x, ...)
- invisible(printsummary.uqo(x, ...)))
+
+
+
setMethod("show", "summary.uqo",
function(object)
- invisible(printsummary.uqo(object)))
+ show.summary.uqo(object))
+
diff --git a/R/vgam.R b/R/vgam.R
index b9da9f3..f1bc33f 100644
--- a/R/vgam.R
+++ b/R/vgam.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -136,14 +136,14 @@ vgam <- function(formula,
- fit <- vgam.fit(x=x, y=y, w=w, m=mf,
- etastart=etastart, mustart=mustart, coefstart=coefstart,
- offset=offset, family=family, control=control,
- criterion=control$criterion,
- constraints=constraints, extra=extra, qr.arg=qr.arg,
- Terms=mtsave,
- nonparametric=nonparametric, smooth.labels=smooth.labels,
- function.name=function.name, ...)
+ fit <- vgam.fit(x = x, y = y, w = w, mf = mf,
+ etastart = etastart, mustart = mustart, coefstart = coefstart,
+ offset = offset, family = family, control = control,
+ criterion = control$criterion,
+ constraints = constraints, extra = extra, qr.arg = qr.arg,
+ Terms = mtsave,
+ nonparametric = nonparametric, smooth.labels = smooth.labels,
+ function.name = function.name, ...)
if (is.Numeric(fit$nl.df) && any(fit$nl.df < 0)) {
diff --git a/R/vgam.control.q b/R/vgam.control.q
index 41ed049..1ca3ff4 100644
--- a/R/vgam.control.q
+++ b/R/vgam.control.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -29,27 +29,29 @@ vgam.control <- function(all.knots = FALSE,
criterion <- names(.min.criterion.VGAM)[criterion]
if (!is.logical(checkwz) || length(checkwz) != 1)
- stop("bad input for argument 'checkwz'")
- if (!is.Numeric(wzepsilon, allow = 1, positive = TRUE))
- stop("bad input for argument 'wzepsilon'")
+ stop("bad input for argument 'checkwz'")
+ if (!is.Numeric(wzepsilon, allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'wzepsilon'")
if (length(all.knots) > 1)
- warning("all.knots should be of length 1; using first value only")
- if (!is.Numeric(bf.epsilon, allow = 1, posit = TRUE)) {
- warning("bad input for argument 'bf.epsilon'; using 0.00001 instead")
- bf.epsilon <- 0.00001
+ warning("all.knots should be of length 1; using first value only")
+ if (!is.Numeric(bf.epsilon, allowable.length = 1, positive = TRUE)) {
+ warning("bad input for argument 'bf.epsilon'; using 0.00001 instead")
+ bf.epsilon <- 0.00001
}
- if (!is.Numeric(bf.maxit, allow = 1, posit = TRUE, integ = TRUE)) {
- warning("bad input for argument 'bf.maxit'; using 20 instead")
- bf.maxit <- 20
+ if (!is.Numeric(bf.maxit, allowable.length = 1,
+ positive = TRUE, integer.valued = TRUE)) {
+ warning("bad input for argument 'bf.maxit'; using 20 instead")
+ bf.maxit <- 20
}
- if (!is.Numeric(epsilon, allow = 1, posit = TRUE)) {
- warning("bad input for argument 'epsilon'; using 0.0001 instead")
- epsilon <- 0.0001
+ if (!is.Numeric(epsilon, allowable.length = 1, positive = TRUE)) {
+ warning("bad input for argument 'epsilon'; using 0.0001 instead")
+ epsilon <- 0.0001
}
- if (!is.Numeric(maxit, allow = 1, posit = TRUE, integ = TRUE)) {
- warning("bad input for argument 'maxit'; using 30 instead")
- maxit <- 30
+ if (!is.Numeric(maxit, allowable.length = 1,
+ positive = TRUE, integer.valued = TRUE)) {
+ warning("bad input for argument 'maxit'; using 30 instead")
+ maxit <- 30
}
convergence <- expression({
@@ -98,7 +100,8 @@ vgam.nlchisq <- function(qr, resid, wz, smomat, deriv, U, smooth.labels,
for(jay in index) {
yy <- t(cmat[,jay-ptr,drop = FALSE])
yy <- kronecker(smomat[,jay,drop = FALSE], yy) # n x M
- Us <- mux22(U, yy, M = M, upper = TRUE, as.matrix = TRUE) # n * M
+ Us <- mux22(U, yy, M = M, upper = TRUE,
+ as.matrix = TRUE) # n * M
Uss <- matrix(c(t(Us)), nrow=n*M, ncol = 1)
diff --git a/R/vgam.fit.q b/R/vgam.fit.q
index 1a55829..714398a 100644
--- a/R/vgam.fit.q
+++ b/R/vgam.fit.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -68,21 +68,22 @@ vgam.fit <- function(x, y, w, mf,
flush.console()
if (!is.finite(one.more) ||
- !is.logical(one.more)) one.more = FALSE
+ !is.logical(one.more)) one.more = FALSE
if (one.more) {
- iter <- iter + 1
- deriv.mu <- eval(family at deriv)
- wz <- eval(family at weight)
- if (control$checkwz)
- wz = checkwz(wz, M=M, trace=trace, wzeps=control$wzepsilon)
-
- U <- vchol(wz, M=M, n=n, silent=!trace)
- tvfor <- vforsub(U, as.matrix(deriv.mu), M=M, n=n)
- z <- eta + vbacksub(U, tvfor, M=M, n=n) - offset
-
- c.list$z <- z
- c.list$wz <- wz
- c.list$U <- U
+ iter <- iter + 1
+ deriv.mu <- eval(family at deriv)
+ wz <- eval(family at weight)
+ if (control$checkwz)
+ wz = checkwz(wz, M = M, trace = trace,
+ wzepsilon = control$wzepsilon)
+
+ U <- vchol(wz, M=M, n=n, silent=!trace)
+ tvfor <- vforsub(U, as.matrix(deriv.mu), M=M, n=n)
+ z <- eta + vbacksub(U, tvfor, M=M, n=n) - offset
+
+ c.list$z <- z
+ c.list$wz <- wz
+ c.list$U <- U
}
c.list$one.more <- one.more
@@ -192,7 +193,8 @@ vgam.fit <- function(x, y, w, mf,
deriv.mu <- eval(family at deriv)
wz <- eval(family at weight)
if (control$checkwz)
- wz = checkwz(wz, M=M, trace=trace, wzeps=control$wzepsilon)
+ wz = checkwz(wz, M = M, trace = trace,
+ wzepsilon = control$wzepsilon)
U <- vchol(wz, M=M, n=n, silent=!trace)
tvfor <- vforsub(U, as.matrix(deriv.mu), M=M, n=n)
diff --git a/R/vgam.match.q b/R/vgam.match.q
index 390c694..87433cf 100644
--- a/R/vgam.match.q
+++ b/R/vgam.match.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/vglm.R b/R/vglm.R
index cea7790..89368d4 100644
--- a/R/vglm.R
+++ b/R/vglm.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/R/vglm.control.q b/R/vglm.control.q
index 87b9832..3bc3e42 100644
--- a/R/vglm.control.q
+++ b/R/vglm.control.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -23,7 +23,7 @@ vlm.control <- function(save.weight = TRUE, tol = 1e-7, method="qr",
}
if (!is.logical(checkwz) || length(checkwz) != 1)
stop("bad input for argument 'checkwz'")
- if (!is.Numeric(wzepsilon, allow = 1, positive = TRUE))
+ if (!is.Numeric(wzepsilon, allowable.length = 1, positive = TRUE))
stop("bad input for argument 'wzepsilon'")
list(save.weight=save.weight, tol=tol, method=method,
@@ -56,7 +56,7 @@ vglm.control <- function(checkwz = TRUE,
if (!is.logical(checkwz) || length(checkwz) != 1)
stop("bad input for argument 'checkwz'")
- if (!is.Numeric(wzepsilon, allow = 1, positive = TRUE))
+ if (!is.Numeric(wzepsilon, allowable.length = 1, positive = TRUE))
stop("bad input for argument 'wzepsilon'")
convergence <- expression({
@@ -65,20 +65,22 @@ vglm.control <- function(checkwz = TRUE,
switch(criterion,
coefficients = if (iter == 1) iter < maxit else
(iter < maxit &&
- max(abs(new.crit - old.crit) / (abs(old.crit) + epsilon)) > epsilon),
+ max(abs(new.crit - old.crit) / (abs(old.crit) + epsilon))
+ > epsilon),
abs(old.crit-new.crit) / (abs(old.crit)+epsilon) > epsilon &&
iter < maxit)
})
- if (!is.Numeric(epsilon, allow = 1, posit = TRUE)) {
+ if (!is.Numeric(epsilon, allowable.length = 1, positive = TRUE)) {
warning("bad input for argument 'epsilon'; using 0.00001 instead")
epsilon <- 0.00001
}
- if (!is.Numeric(maxit, allow = 1, posit = TRUE, integ = TRUE)) {
+ if (!is.Numeric(maxit, allowable.length = 1,
+ positive = TRUE, integer.valued = TRUE)) {
warning("bad input for argument 'maxit'; using 30 instead")
maxit <- 30
}
- if (!is.Numeric(stepsize, allow = 1, posit = TRUE)) {
+ if (!is.Numeric(stepsize, allowable.length = 1, positive = TRUE)) {
warning("bad input for argument 'stepsize'; using 1 instead")
stepsize <- 1
}
diff --git a/R/vglm.fit.q b/R/vglm.fit.q
index 3f392e3..f3f9963 100644
--- a/R/vglm.fit.q
+++ b/R/vglm.fit.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -148,19 +148,20 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
if (!is.logical(one.more)) one.more = FALSE
if (one.more) {
- iter <- iter + 1
- deriv.mu <- eval(slot(family, "deriv"))
- wz <- eval(slot(family, "weight"))
- if (control$checkwz)
- wz = checkwz(wz, M = M, trace = trace, wzeps = control$wzepsilon)
-
- U <- vchol(wz, M = M, n = n, silent=!trace)
- tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
- z <- eta + vbacksub(U, tvfor, M = M, n = n) - offset
-
- c.list$z <- z
- c.list$U <- U
- if (copy_X_vlm) c.list$X_vlm <- X_vlm_save
+ iter <- iter + 1
+ deriv.mu <- eval(slot(family, "deriv"))
+ wz <- eval(slot(family, "weight"))
+ if (control$checkwz)
+ wz = checkwz(wz, M = M, trace = trace,
+ wzepsilon = control$wzepsilon)
+
+ U <- vchol(wz, M = M, n = n, silent=!trace)
+ tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
+ z <- eta + vbacksub(U, tvfor, M = M, n = n) - offset
+
+ c.list$z <- z
+ c.list$U <- U
+ if (copy_X_vlm) c.list$X_vlm <- X_vlm_save
}
c.list$one.more <- one.more
@@ -247,7 +248,8 @@ vglm.fit <- function(x, y, w=rep(1, length(x[, 1])),
deriv.mu <- eval(slot(family, "deriv"))
wz <- eval(slot(family, "weight"))
if (control$checkwz)
- wz = checkwz(wz, M = M, trace = trace, wzeps = control$wzepsilon)
+ wz = checkwz(wz, M = M, trace = trace,
+ wzepsilon = control$wzepsilon)
U <- vchol(wz, M = M, n = n, silent=!trace)
tvfor <- vforsub(U, as.matrix(deriv.mu), M = M, n = n)
diff --git a/R/vlm.R b/R/vlm.R
index c9fbd6d..f05b4bf 100644
--- a/R/vlm.R
+++ b/R/vlm.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -93,9 +93,10 @@ vlm <- function(formula,
Blist <- process.constraints(constraints, x, M)
intercept.only <- ncol(x) == 1 && dimnames(x)[[2]] == "(Intercept)"
- fit = vlm.wfit(xmat=x, z=y, Blist=Blist, wz=wz, U=NULL,
- matrix.out=FALSE, is.vlmX=FALSE, rss=TRUE, qr=qr.arg,
- x.ret=TRUE, offset = offset)
+ fit = vlm.wfit(xmat=x, zmat = y, Blist = Blist, wz = wz, U = NULL,
+ matrix.out = FALSE, is.vlmX = FALSE,
+ rss = TRUE, qr = qr.arg,
+ x.ret = TRUE, offset = offset)
ncol_X_vlm <- fit$rank
fit$R <- fit$qr$qr[1:ncol_X_vlm, 1:ncol_X_vlm, drop=FALSE]
diff --git a/R/vlm.wfit.q b/R/vlm.wfit.q
index 5625fca..5dc23fa 100644
--- a/R/vlm.wfit.q
+++ b/R/vlm.wfit.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -7,17 +7,21 @@
-vlm.wfit <- function(xmat, zmat, Blist, wz=NULL, U=NULL,
- matrix.out=FALSE, is.vlmX=FALSE, rss=TRUE, qr=FALSE, x.ret=FALSE,
- offset=NULL,
- omit.these=NULL, only.rss=FALSE,
+
+
+
+vlm.wfit <- function(xmat, zmat, Blist, wz = NULL, U = NULL,
+ matrix.out = FALSE, is.vlmX = FALSE, rss = TRUE, qr = FALSE,
+ x.ret = FALSE,
+ offset = NULL,
+ omit.these = NULL, only.rss = FALSE,
ncolx = if (matrix.out && is.vlmX) {
stop("need argument 'ncolx'")
} else {
ncol(xmat)
},
- xij=NULL,
- lp.names=NULL, Eta.range=NULL, Xm2=NULL, ...) {
+ xij = NULL,
+ lp.names = NULL, Eta.range = NULL, Xm2 = NULL, ...) {
missing.Blist <- missing(Blist)
zmat = as.matrix(zmat)
n <- nrow(zmat)
@@ -31,7 +35,7 @@ vlm.wfit <- function(xmat, zmat, Blist, wz=NULL, U=NULL,
zmat <- zmat - offset
}
if (missing(U) || !length(U)) {
- U <- vchol(wz, M=M, n=n, silent=FALSE)
+ U <- vchol(wz, M = M, n = n, silent = FALSE)
}
dU <- dim(U)
if (dU[2] != n) {
@@ -45,17 +49,17 @@ vlm.wfit <- function(xmat, zmat, Blist, wz=NULL, U=NULL,
Blist = replace.constraints(vector("list", ncol(xmat)),
diag(M), 1:ncol(xmat)) # NULL
}
- lm2vlm.model.matrix(x=xmat, Blist=Blist, M=M,
- assign.attributes=FALSE,
+ lm2vlm.model.matrix(x=xmat, Blist=Blist, M = M,
+ assign.attributes = FALSE,
xij = xij,
Xm2=Xm2)
}
- X_vlm <- mux111(U, X_vlm_save, M=M)
- z_vlm <- mux22(U, zmat, M=M, upper=TRUE, as.mat=FALSE)
+ X_vlm <- mux111(U, X_vlm_save, M = M)
+ z_vlm <- mux22(U, zmat, M = M, upper = TRUE, as.matrix = FALSE)
if (length(omit.these)) {
- X_vlm = X_vlm[!omit.these,,drop=FALSE]
+ X_vlm = X_vlm[!omit.these,,drop = FALSE]
z_vlm = z_vlm[!omit.these]
}
@@ -63,7 +67,7 @@ vlm.wfit <- function(xmat, zmat, Blist, wz=NULL, U=NULL,
if (rss) {
ans$rss <- sum(ans$resid^2)
- if (only.rss) return(list(rss=ans$rss))
+ if (only.rss) return(list(rss = ans$rss))
}
if (length(omit.these) && any(omit.these)) {
@@ -73,7 +77,7 @@ vlm.wfit <- function(xmat, zmat, Blist, wz=NULL, U=NULL,
fv <- ans$fitted.values
dim(fv) <- c(M, n)
- fv <- vbacksub(U, fv, M=M, n=n) # Have to premultiply fv by U
+ fv <- vbacksub(U, fv, M = M, n = n) # Have to premultiply fv by U
if (length(Eta.range)) {
@@ -84,15 +88,15 @@ vlm.wfit <- function(xmat, zmat, Blist, wz=NULL, U=NULL,
fv = ifelse(fv > Eta.range[2], Eta.range[2], fv)
}
- ans$fitted.values <- if (M==1) c(fv) else fv
+ ans$fitted.values <- if (M == 1) c(fv) else fv
if (M > 1) {
dimnames(ans$fitted.values) <- list(dimnames(zmat)[[1]], znames)
}
- ans$residuals <- if (M==1) c(zmat-fv) else zmat-fv
+ ans$residuals <- if (M == 1) c(zmat-fv) else zmat-fv
if (M > 1) {
dimnames(ans$residuals) <- list(dimnames(ans$residuals)[[1]], znames)
}
- ans$misc <- list(M=M, n=n)
+ ans$misc <- list(M = M, n = n)
ans$call <- match.call()
ans$constraints <- Blist
@@ -114,7 +118,8 @@ vlm.wfit <- function(xmat, zmat, Blist, wz=NULL, U=NULL,
dx2 = if (is.vlmX) NULL else dimnames(xmat)[[2]]
- B = matrix(as.numeric(NA), nr=M, nc=ncolx, dimnames=list(lp.names, dx2))
+ B = matrix(as.numeric(NA),
+ nrow = M, ncol = ncolx, dimnames = list(lp.names, dx2))
if (is.null(Blist)) {
Blist = replace.constraints(vector("list", ncolx), diag(M), 1:ncolx)
}
@@ -130,33 +135,36 @@ vlm.wfit <- function(xmat, zmat, Blist, wz=NULL, U=NULL,
}
-print.vlm.wfit <- function(x, ...) {
- if (!is.null(cl <- x$call)) {
- cat("Call:\n")
- dput(cl)
- }
- coef <- x$coefficients
- cat("\nCoefficients:\n")
- print(coef, ...)
- rank <- x$rank
- if (is.null(rank)) {
- rank <- sum(!is.na(coef))
- }
- n <- x$misc$n
- M <- x$misc$M
- rdf <- x$df.resid
- if (is.null(rdf)) {
- rdf <- (n - rank) * M
- }
- cat("\nDegrees of Freedom:", n*M, "Total;", rdf, "Residual\n")
-
- if (!is.null(x$rss)) {
- cat("Residual Sum of Squares:", format(x$rss), "\n")
- }
-
- invisible(x)
+if (FALSE)
+print.vlm.wfit <- function(x, ...) {
+ if (!is.null(cl <- x$call)) {
+ cat("Call:\n")
+ dput(cl)
+ }
+
+ coef <- x$coefficients
+ cat("\nCoefficients:\n")
+ print(coef, ...)
+
+ rank <- x$rank
+ if (is.null(rank)) {
+ rank <- sum(!is.na(coef))
+ }
+ n <- x$misc$n
+ M <- x$misc$M
+ rdf <- x$df.resid
+ if (is.null(rdf)) {
+ rdf <- (n - rank) * M
+ }
+ cat("\nDegrees of Freedom:", n*M, "Total;", rdf, "Residual\n")
+
+ if (!is.null(x$rss)) {
+ cat("Residual Sum of Squares:", format(x$rss), "\n")
+ }
+
+ invisible(x)
}
diff --git a/R/vsmooth.spline.q b/R/vsmooth.spline.q
index 9a36714..c6fc0b2 100644
--- a/R/vsmooth.spline.q
+++ b/R/vsmooth.spline.q
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
@@ -62,12 +62,16 @@ setMethod("resid", signature(object="vsmooth.spline"),
setMethod("predict", signature(object="vsmooth.spline"),
function(object, ...)
predictvsmooth.spline(object, ...))
-setMethod("print", "vsmooth.spline",
- function(x, ...)
- invisible(printvsmooth.spline(x, ...)))
+
+
+
setMethod("show", "vsmooth.spline",
function(object)
- printvsmooth.spline(object))
+ show.vsmooth.spline(object))
+
+
+
+
setMethod("plot", "vsmooth.spline",
function(x, y, ...) {
if (!missing(y)) stop("cannot process the 'y' argument")
@@ -222,9 +226,10 @@ vsmooth.spline <- function(x, y, w = NULL, df = rep(5, M),
lfit = vlm(yinyin ~ 1 + x, # xxx
constraints = constraints,
- save.weight = FALSE, qr = FALSE, x = FALSE, y = FALSE,
+ save.weight = FALSE,
+ qr.arg = FALSE, x.arg = FALSE, y.arg = FALSE,
smart = FALSE,
- weight = matrix(collaps$wzbar, neff, dim2wz))
+ weights = matrix(collaps$wzbar, neff, dim2wz))
}
ncb0 <- ncol(constraints[[2]]) # Of xxx and not of the intercept
@@ -422,27 +427,27 @@ vsmooth.spline <- function(x, y, w = NULL, df = rep(5, M),
}
-printvsmooth.spline <- function(x, ...) {
- if (!is.null(cl <- x at call)) {
- cat("Call:\n")
- dput(cl)
- }
+show.vsmooth.spline <- function(x, ...) {
+ if (!is.null(cl <- x at call)) {
+ cat("Call:\n")
+ dput(cl)
+ }
- ncb <- if (length(x at nlfit)) ncol(x at nlfit@Bcoefficients) else NULL
- cat("\nSmoothing Parameter (Spar):",
- if (length(ncb) && ncb == 1) format(x at spar) else
- paste(format(x at spar), collapse=", "), "\n")
+ ncb <- if (length(x at nlfit)) ncol(x at nlfit@Bcoefficients) else NULL
+ cat("\nSmoothing Parameter (Spar):",
+ if (length(ncb) && ncb == 1) format(x at spar) else
+ paste(format(x at spar), collapse=", "), "\n")
- cat("\nEquivalent Degrees of Freedom (Df):",
- if (length(ncb) && ncb == 1) format(x at df) else
- paste(format(x at df), collapse=", "), "\n")
+ cat("\nEquivalent Degrees of Freedom (Df):",
+ if (length(ncb) && ncb == 1) format(x at df) else
+ paste(format(x at df), collapse=", "), "\n")
- if (!all(trivial.constraints(x at constraints) == 1)) {
- cat("\nConstraint matrices:\n")
- print(x at constraints)
- }
+ if (!all(trivial.constraints(x at constraints) == 1)) {
+ cat("\nConstraint matrices:\n")
+ print(x at constraints)
+ }
- invisible(x)
+ invisible(x)
}
@@ -451,9 +456,9 @@ coefvsmooth.spline.fit = function(object, ...) {
}
-coefvsmooth.spline = function(object, matrix=FALSE, ...) {
+coefvsmooth.spline = function(object, matrix = FALSE, ...) {
- list(lfit=coefvlm(object at lfit, matrix=matrix),
+ list(lfit = coefvlm(object at lfit, matrix.out = matrix),
nlfit=coefvsmooth.spline.fit(object at nlfit))
}
@@ -511,13 +516,15 @@ predictvsmooth.spline <- function(object, x, deriv = 0, se.fit = FALSE) {
}
- mat.coef = coefvlm(lfit, matrix=TRUE)
+ mat.coef = coefvlm(lfit, matrix.out = TRUE)
coeflfit <- t(mat.coef) # M x p now
M <- nrow(coeflfit) # if (is.matrix(object at y)) ncol(object at y) else 1
- pred = if (deriv == 0) predict(lfit, data.frame(x = x)) else
- if (deriv == 1) matrix(coeflfit[,2], length(x), M, byr=TRUE) else
- matrix(0, length(x), M)
+ pred = if (deriv == 0)
+ predict(lfit, data.frame(x = x)) else
+ if (deriv == 1)
+ matrix(coeflfit[,2], length(x), M, byrow = TRUE) else
+ matrix(0, length(x), M)
if (!length(nlfit at knots)) {
return(list(x = x, y = pred))
}
@@ -526,17 +533,17 @@ predictvsmooth.spline <- function(object, x, deriv = 0, se.fit = FALSE) {
conmat = if (!length(lfit at constraints)) diag(M) else
lfit at constraints[[2]]
- conmat = conmat[, nonlin, drop=FALSE] # Of nonlinear functions
+ conmat = conmat[, nonlin, drop = FALSE] # Of nonlinear functions
list(x = x, y=pred + predict(nlfit, x, deriv)$y %*% t(conmat))
}
-predictvsmooth.spline.fit <- function(object, x, deriv=0) {
+predictvsmooth.spline.fit <- function(object, x, deriv = 0) {
nknots = nrow(object at Bcoefficients)
drangex <- object at xmax - object at xmin
if (missing(x))
- x <- seq(from=object at xmin, to=object at xmax, length=nknots-4)
+ x <- seq(from = object at xmin, to = object at xmax, length.out = nknots-4)
xs <- as.double((x - object at xmin) / drangex)
diff --git a/R/zzz.R b/R/zzz.R
index 61570a9..b26c345 100644
--- a/R/zzz.R
+++ b/R/zzz.R
@@ -1,5 +1,5 @@
# These functions are
-# Copyright (C) 1998-2011 T.W. Yee, University of Auckland.
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
# All rights reserved.
diff --git a/data/alclevels.rda b/data/alclevels.rda
index a0ed2e3..a9375c8 100644
Binary files a/data/alclevels.rda and b/data/alclevels.rda differ
diff --git a/data/alcoff.rda b/data/alcoff.rda
index 89847ff..7782950 100644
Binary files a/data/alcoff.rda and b/data/alcoff.rda differ
diff --git a/data/auuc.rda b/data/auuc.rda
index a0d31bc..82e7292 100644
Binary files a/data/auuc.rda and b/data/auuc.rda differ
diff --git a/data/azprocedure.rda b/data/azprocedure.rda
deleted file mode 100644
index c1f21b2..0000000
Binary files a/data/azprocedure.rda and /dev/null differ
diff --git a/data/backPain.rda b/data/backPain.rda
index 0d11079..b8a97f8 100644
Binary files a/data/backPain.rda and b/data/backPain.rda differ
diff --git a/data/backPain.txt.gz b/data/backPain.txt.gz
index 1ce44fe..a14ebc5 100644
Binary files a/data/backPain.txt.gz and b/data/backPain.txt.gz differ
diff --git a/data/bmi.nz.txt.xz b/data/bmi.nz.txt.xz
new file mode 100644
index 0000000..7ff9878
Binary files /dev/null and b/data/bmi.nz.txt.xz differ
diff --git a/data/bminz.txt.gz b/data/bminz.txt.gz
deleted file mode 100644
index 8abe188..0000000
Binary files a/data/bminz.txt.gz and /dev/null differ
diff --git a/data/car.all.rda b/data/car.all.rda
index 980046c..48e80c1 100644
Binary files a/data/car.all.rda and b/data/car.all.rda differ
diff --git a/data/chest.nz.txt.bz2 b/data/chest.nz.txt.bz2
new file mode 100644
index 0000000..f500457
Binary files /dev/null and b/data/chest.nz.txt.bz2 differ
diff --git a/data/chestnz.txt.gz b/data/chestnz.txt.gz
deleted file mode 100644
index a460183..0000000
Binary files a/data/chestnz.txt.gz and /dev/null differ
diff --git a/data/nzc.txt.gz b/data/chinese.nz.txt.gz
similarity index 100%
rename from data/nzc.txt.gz
rename to data/chinese.nz.txt.gz
diff --git a/data/crashbc.rda b/data/crashbc.rda
index d50332a..b7cd392 100644
Binary files a/data/crashbc.rda and b/data/crashbc.rda differ
diff --git a/data/crashf.rda b/data/crashf.rda
index 538d3d7..a1f3747 100644
Binary files a/data/crashf.rda and b/data/crashf.rda differ
diff --git a/data/crashi.rda b/data/crashi.rda
index 977f7e5..f56adae 100644
Binary files a/data/crashi.rda and b/data/crashi.rda differ
diff --git a/data/crashmc.rda b/data/crashmc.rda
index 5fca503..edf8f59 100644
Binary files a/data/crashmc.rda and b/data/crashmc.rda differ
diff --git a/data/crashp.rda b/data/crashp.rda
index 81ca3b5..85c23ca 100644
Binary files a/data/crashp.rda and b/data/crashp.rda differ
diff --git a/data/crashtr.rda b/data/crashtr.rda
index 87d2093..22b613c 100644
Binary files a/data/crashtr.rda and b/data/crashtr.rda differ
diff --git a/data/crime.us.rda b/data/crime.us.rda
new file mode 100644
index 0000000..94e70cd
Binary files /dev/null and b/data/crime.us.rda differ
diff --git a/data/datalist b/data/datalist
new file mode 100644
index 0000000..a8b4d3f
--- /dev/null
+++ b/data/datalist
@@ -0,0 +1,47 @@
+alclevels
+alcoff
+auuc
+backPain
+bmi.nz
+car.all
+chest.nz
+chinese.nz
+coalminers
+crashbc
+crashf
+crashi
+crashmc
+crashp
+crashtr
+crime.us
+enzyme
+fibre15
+fibre1dot5: fibre1.5
+finney44
+gala
+gew
+grain.us
+hspider
+hued
+huie
+hunua
+huse
+leukemia
+lirat
+marital.nz
+mmt
+olympic
+oxtemp
+pneumo
+rainfall
+ruge
+toxop
+ugss
+venice
+venice90
+waitakere
+wffc
+wffc.indiv
+wffc.nc
+wffc.teams
+xs.nz
diff --git a/data/fibre15.rda b/data/fibre15.rda
index 161323f..895e424 100644
Binary files a/data/fibre15.rda and b/data/fibre15.rda differ
diff --git a/data/fibre1dot5.rda b/data/fibre1dot5.rda
index 5f507d0..caec8d9 100644
Binary files a/data/fibre1dot5.rda and b/data/fibre1dot5.rda differ
diff --git a/data/finney44.rda b/data/finney44.rda
index 1828aec..26f11db 100644
Binary files a/data/finney44.rda and b/data/finney44.rda differ
diff --git a/data/gala.rda b/data/gala.rda
index 3580f83..f1d9c73 100644
Binary files a/data/gala.rda and b/data/gala.rda differ
diff --git a/data/gew.txt.gz b/data/gew.txt.gz
index 0653af6..c8c35a6 100644
Binary files a/data/gew.txt.gz and b/data/gew.txt.gz differ
diff --git a/data/grain.us.txt.bz2 b/data/grain.us.txt.bz2
new file mode 100644
index 0000000..07411db
Binary files /dev/null and b/data/grain.us.txt.bz2 differ
diff --git a/data/hspider.rda b/data/hspider.rda
index 7347100..f4881ef 100644
Binary files a/data/hspider.rda and b/data/hspider.rda differ
diff --git a/data/hued.rda b/data/hued.rda
index 29f3c5e..82658bf 100644
Binary files a/data/hued.rda and b/data/hued.rda differ
diff --git a/data/huie.rda b/data/huie.rda
index 9d98fe0..4a250b9 100644
Binary files a/data/huie.rda and b/data/huie.rda differ
diff --git a/data/hunua.txt.bz2 b/data/hunua.txt.bz2
new file mode 100644
index 0000000..a7ebf70
Binary files /dev/null and b/data/hunua.txt.bz2 differ
diff --git a/data/hunua.txt.gz b/data/hunua.txt.gz
deleted file mode 100644
index 9ecb4b2..0000000
Binary files a/data/hunua.txt.gz and /dev/null differ
diff --git a/data/huse.rda b/data/huse.rda
index d2db92f..11e8c39 100644
Binary files a/data/huse.rda and b/data/huse.rda differ
diff --git a/data/leukemia.rda b/data/leukemia.rda
index 51bfd1e..314b7a8 100644
Binary files a/data/leukemia.rda and b/data/leukemia.rda differ
diff --git a/data/marital.nz.rda b/data/marital.nz.rda
new file mode 100644
index 0000000..7c51054
Binary files /dev/null and b/data/marital.nz.rda differ
diff --git a/data/mmt.rda b/data/mmt.rda
index ee5a72f..1902b9f 100644
Binary files a/data/mmt.rda and b/data/mmt.rda differ
diff --git a/data/nzmarital.rda b/data/nzmarital.rda
deleted file mode 100644
index 11718ce..0000000
Binary files a/data/nzmarital.rda and /dev/null differ
diff --git a/data/pneumo.rda b/data/pneumo.rda
index d450d98..789a802 100644
Binary files a/data/pneumo.rda and b/data/pneumo.rda differ
diff --git a/data/rainfall.rda b/data/rainfall.rda
index b99a6e7..190fe2b 100644
Binary files a/data/rainfall.rda and b/data/rainfall.rda differ
diff --git a/data/ruge.rda b/data/ruge.rda
index db5a77e..a99d004 100644
Binary files a/data/ruge.rda and b/data/ruge.rda differ
diff --git a/data/toxop.rda b/data/toxop.rda
index 6642d43..d0c02f3 100644
Binary files a/data/toxop.rda and b/data/toxop.rda differ
diff --git a/data/ugss.rda b/data/ugss.rda
index a162e4c..c7dfc32 100644
Binary files a/data/ugss.rda and b/data/ugss.rda differ
diff --git a/data/uscrime.rda b/data/uscrime.rda
deleted file mode 100644
index 6aaceb2..0000000
Binary files a/data/uscrime.rda and /dev/null differ
diff --git a/data/usgrain.txt.gz b/data/usgrain.txt.gz
deleted file mode 100644
index a6b643c..0000000
Binary files a/data/usgrain.txt.gz and /dev/null differ
diff --git a/data/venice.rda b/data/venice.rda
index 9fce965..e074438 100644
Binary files a/data/venice.rda and b/data/venice.rda differ
diff --git a/data/venice90.rda b/data/venice90.rda
index 7d87be2..5303d30 100644
Binary files a/data/venice90.rda and b/data/venice90.rda differ
diff --git a/data/waitakere.txt.bz2 b/data/waitakere.txt.bz2
new file mode 100644
index 0000000..418956d
Binary files /dev/null and b/data/waitakere.txt.bz2 differ
diff --git a/data/waitakere.txt.gz b/data/waitakere.txt.gz
deleted file mode 100644
index 1971b3f..0000000
Binary files a/data/waitakere.txt.gz and /dev/null differ
diff --git a/data/wffc.indiv.rda b/data/wffc.indiv.rda
index 6c195a6..6f2369e 100644
Binary files a/data/wffc.indiv.rda and b/data/wffc.indiv.rda differ
diff --git a/data/wffc.nc.rda b/data/wffc.nc.rda
index 8d17a51..0d97c86 100644
Binary files a/data/wffc.nc.rda and b/data/wffc.nc.rda differ
diff --git a/data/wffc.rda b/data/wffc.rda
index 2edf8ae..016db77 100644
Binary files a/data/wffc.rda and b/data/wffc.rda differ
diff --git a/data/wffc.teams.rda b/data/wffc.teams.rda
index 5f8e595..b13eb0f 100644
Binary files a/data/wffc.teams.rda and b/data/wffc.teams.rda differ
diff --git a/data/xs.nz.rda b/data/xs.nz.rda
new file mode 100644
index 0000000..2f36daa
Binary files /dev/null and b/data/xs.nz.rda differ
diff --git a/inst/doc/categoricalVGAM.Rnw b/inst/doc/categoricalVGAM.Rnw
index 9d63d50..8009523 100644
--- a/inst/doc/categoricalVGAM.Rnw
+++ b/inst/doc/categoricalVGAM.Rnw
@@ -1519,16 +1519,16 @@ We will write these levels as $Y=1$, $2$, $3$, $4$, respectively,
and will choose the married/partnered (second level) as the reference group
because the other levels emanate directly from it.
-Suppose the data is in a data frame called \texttt{nzmarital}
+Suppose the data is in a data frame called \texttt{marital.nz}
and looks like
<<>>=
-head(nzmarital, 4)
-summary(nzmarital)
+head(marital.nz, 4)
+summary(marital.nz)
@
We fit the VGAM
<<>>=
fit.ms <- vgam(mstatus ~ s(age, df = 3), multinomial(refLevel=2),
- data = nzmarital)
+ data = marital.nz)
@
Once again let's firstly check the input.
@@ -1680,7 +1680,7 @@ fit2.ms <-
vglm(mstatus ~ poly(age, 2) + foo(age) + age,
family = multinomial(refLevel=2),
constraints=clist,
- data=nzmarital)
+ data=marital.nz)
@
Then
<<>>=
@@ -1759,8 +1759,8 @@ Thus \texttt{fit2.ms} appears quite reasonable.
The estimated probabilities of the original fit can be plotted
against~\texttt{age} using
<<fig=F>>=
-ooo <- with(nzmarital, order(age))
-with(nzmarital, matplot(age[ooo], fitted(fit.ms)[ooo,],
+ooo <- with(marital.nz, order(age))
+with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo,],
type="l", las=1, lwd=2, ylim=0:1,
ylab="Fitted probabilities",
xlab="Age", # main="Marital status amongst NZ Male Europeans",
@@ -1787,8 +1787,8 @@ since males die younger than females on average.
<<fig=TRUE,width=8,height=4.8,echo=FALSE>>=
par(mfrow=c(1,1))
par(mar=c(4.5,4.0,0.2,0.2)+0.1)
-ooo <- with(nzmarital, order(age))
-with(nzmarital, matplot(age[ooo], fitted(fit.ms)[ooo,],
+ooo <- with(marital.nz, order(age))
+with(marital.nz, matplot(age[ooo], fitted(fit.ms)[ooo,],
type="l", las=1, lwd=2, ylim=0:1,
ylab="Fitted probabilities",
xlab="Age",
diff --git a/inst/doc/categoricalVGAM.pdf b/inst/doc/categoricalVGAM.pdf
index b796de0..37470d7 100644
Binary files a/inst/doc/categoricalVGAM.pdf and b/inst/doc/categoricalVGAM.pdf differ
diff --git a/inst/doc/jss.bst b/inst/doc/jss.bst
deleted file mode 100644
index a5b0e78..0000000
--- a/inst/doc/jss.bst
+++ /dev/null
@@ -1,1647 +0,0 @@
-%%
-%% This is file `jss.bst',
-%% generated with the docstrip utility.
-%%
-%% The original source files were:
-%%
-%% merlin.mbs (with options: `ay,nat,nm-rvx,keyxyr,dt-beg,yr-par,note-yr,tit-qq,bt-qq,atit-u,trnum-it,vol-bf,volp-com,num-xser,isbn,issn,edpar,pp,ed,xedn,xand,etal-it,revdata,eprint,url,url-blk,doi,nfss')
-%% ----------------------------------------
-%% *** Journal of Statistical Software ***
-%%
-%% Copyright 1994-2004 Patrick W Daly
- % ===============================================================
- % IMPORTANT NOTICE:
- % This bibliographic style (bst) file has been generated from one or
- % more master bibliographic style (mbs) files, listed above.
- %
- % This generated file can be redistributed and/or modified under the terms
- % of the LaTeX Project Public License Distributed from CTAN
- % archives in directory macros/latex/base/lppl.txt; either
- % version 1 of the License, or any later version.
- % ===============================================================
- % Name and version information of the main mbs file:
- % \ProvidesFile{merlin.mbs}[2004/02/09 4.13 (PWD, AO, DPC)]
- % For use with BibTeX version 0.99a or later
- %-------------------------------------------------------------------
- % This bibliography style file is intended for texts in ENGLISH
- % This is an author-year citation style bibliography. As such, it is
- % non-standard LaTeX, and requires a special package file to function properly.
- % Such a package is natbib.sty by Patrick W. Daly
- % The form of the \bibitem entries is
- % \bibitem[Jones et al.(1990)]{key}...
- % \bibitem[Jones et al.(1990)Jones, Baker, and Smith]{key}...
- % The essential feature is that the label (the part in brackets) consists
- % of the author names, as they should appear in the citation, with the year
- % in parentheses following. There must be no space before the opening
- % parenthesis!
- % With natbib v5.3, a full list of authors may also follow the year.
- % In natbib.sty, it is possible to define the type of enclosures that is
- % really wanted (brackets or parentheses), but in either case, there must
- % be parentheses in the label.
- % The \cite command functions as follows:
- % \citet{key} ==>> Jones et al. (1990)
- % \citet*{key} ==>> Jones, Baker, and Smith (1990)
- % \citep{key} ==>> (Jones et al., 1990)
- % \citep*{key} ==>> (Jones, Baker, and Smith, 1990)
- % \citep[chap. 2]{key} ==>> (Jones et al., 1990, chap. 2)
- % \citep[e.g.][]{key} ==>> (e.g. Jones et al., 1990)
- % \citep[e.g.][p. 32]{key} ==>> (e.g. Jones et al., p. 32)
- % \citeauthor{key} ==>> Jones et al.
- % \citeauthor*{key} ==>> Jones, Baker, and Smith
- % \citeyear{key} ==>> 1990
- %---------------------------------------------------------------------
-
-ENTRY
- { address
- archive
- author
- booktitle
- chapter
- collaboration
- doi
- edition
- editor
- eid
- eprint
- howpublished
- institution
- isbn
- issn
- journal
- key
- month
- note
- number
- numpages
- organization
- pages
- publisher
- school
- series
- title
- type
- url
- volume
- year
- }
- {}
- { label extra.label sort.label short.list }
-INTEGERS { output.state before.all mid.sentence after.sentence after.block }
-FUNCTION {init.state.consts}
-{ #0 'before.all :=
- #1 'mid.sentence :=
- #2 'after.sentence :=
- #3 'after.block :=
-}
-STRINGS { s t}
-FUNCTION {output.nonnull}
-{ 's :=
- output.state mid.sentence =
- { ", " * write$ }
- { output.state after.block =
- { add.period$ write$
- newline$
- "\newblock " write$
- }
- { output.state before.all =
- 'write$
- { add.period$ " " * write$ }
- if$
- }
- if$
- mid.sentence 'output.state :=
- }
- if$
- s
-}
-FUNCTION {output}
-{ duplicate$ empty$
- 'pop$
- 'output.nonnull
- if$
-}
-FUNCTION {output.check}
-{ 't :=
- duplicate$ empty$
- { pop$ "empty " t * " in " * cite$ * warning$ }
- 'output.nonnull
- if$
-}
-FUNCTION {fin.entry}
-{ add.period$
- write$
- newline$
-}
-
-FUNCTION {new.block}
-{ output.state before.all =
- 'skip$
- { after.block 'output.state := }
- if$
-}
-FUNCTION {new.sentence}
-{ output.state after.block =
- 'skip$
- { output.state before.all =
- 'skip$
- { after.sentence 'output.state := }
- if$
- }
- if$
-}
-FUNCTION {add.blank}
-{ " " * before.all 'output.state :=
-}
-
-FUNCTION {date.block}
-{
- new.block
-}
-
-FUNCTION {not}
-{ { #0 }
- { #1 }
- if$
-}
-FUNCTION {and}
-{ 'skip$
- { pop$ #0 }
- if$
-}
-FUNCTION {or}
-{ { pop$ #1 }
- 'skip$
- if$
-}
-FUNCTION {non.stop}
-{ duplicate$
- "}" * add.period$
- #-1 #1 substring$ "." =
-}
-
-STRINGS {z}
-FUNCTION {remove.dots}
-{ 'z :=
- ""
- { z empty$ not }
- { z #1 #1 substring$
- z #2 global.max$ substring$ 'z :=
- duplicate$ "." = 'pop$
- { * }
- if$
- }
- while$
-}
-FUNCTION {new.block.checkb}
-{ empty$
- swap$ empty$
- and
- 'skip$
- 'new.block
- if$
-}
-FUNCTION {field.or.null}
-{ duplicate$ empty$
- { pop$ "" }
- 'skip$
- if$
-}
-FUNCTION {emphasize}
-{ duplicate$ empty$
- { pop$ "" }
- { "\emph{" swap$ * "}" * }
- if$
-}
-FUNCTION {bolden}
-{ duplicate$ empty$
- { pop$ "" }
- { "\textbf{" swap$ * "}" * }
- if$
-}
-FUNCTION {tie.or.space.prefix}
-{ duplicate$ text.length$ #3 <
- { "~" }
- { " " }
- if$
- swap$
-}
-
-FUNCTION {capitalize}
-{ "u" change.case$ "t" change.case$ }
-
-FUNCTION {space.word}
-{ " " swap$ * " " * }
- % Here are the language-specific definitions for explicit words.
- % Each function has a name bbl.xxx where xxx is the English word.
- % The language selected here is ENGLISH
-FUNCTION {bbl.and}
-{ "and"}
-
-FUNCTION {bbl.etal}
-{ "et~al." }
-
-FUNCTION {bbl.editors}
-{ "eds." }
-
-FUNCTION {bbl.editor}
-{ "ed." }
-
-FUNCTION {bbl.edby}
-{ "edited by" }
-
-FUNCTION {bbl.edition}
-{ "edition" }
-
-FUNCTION {bbl.volume}
-{ "volume" }
-
-FUNCTION {bbl.of}
-{ "of" }
-
-FUNCTION {bbl.number}
-{ "number" }
-
-FUNCTION {bbl.nr}
-{ "no." }
-
-FUNCTION {bbl.in}
-{ "in" }
-
-FUNCTION {bbl.pages}
-{ "pp." }
-
-FUNCTION {bbl.page}
-{ "p." }
-
-FUNCTION {bbl.eidpp}
-{ "pages" }
-
-FUNCTION {bbl.chapter}
-{ "chapter" }
-
-FUNCTION {bbl.techrep}
-{ "Technical Report" }
-
-FUNCTION {bbl.mthesis}
-{ "Master's thesis" }
-
-FUNCTION {bbl.phdthesis}
-{ "Ph.D. thesis" }
-
-MACRO {jan} {"January"}
-
-MACRO {feb} {"February"}
-
-MACRO {mar} {"March"}
-
-MACRO {apr} {"April"}
-
-MACRO {may} {"May"}
-
-MACRO {jun} {"June"}
-
-MACRO {jul} {"July"}
-
-MACRO {aug} {"August"}
-
-MACRO {sep} {"September"}
-
-MACRO {oct} {"October"}
-
-MACRO {nov} {"November"}
-
-MACRO {dec} {"December"}
-
-MACRO {acmcs} {"ACM Computing Surveys"}
-
-MACRO {acta} {"Acta Informatica"}
-
-MACRO {cacm} {"Communications of the ACM"}
-
-MACRO {ibmjrd} {"IBM Journal of Research and Development"}
-
-MACRO {ibmsj} {"IBM Systems Journal"}
-
-MACRO {ieeese} {"IEEE Transactions on Software Engineering"}
-
-MACRO {ieeetc} {"IEEE Transactions on Computers"}
-
-MACRO {ieeetcad}
- {"IEEE Transactions on Computer-Aided Design of Integrated Circuits"}
-
-MACRO {ipl} {"Information Processing Letters"}
-
-MACRO {jacm} {"Journal of the ACM"}
-
-MACRO {jcss} {"Journal of Computer and System Sciences"}
-
-MACRO {scp} {"Science of Computer Programming"}
-
-MACRO {sicomp} {"SIAM Journal on Computing"}
-
-MACRO {tocs} {"ACM Transactions on Computer Systems"}
-
-MACRO {tods} {"ACM Transactions on Database Systems"}
-
-MACRO {tog} {"ACM Transactions on Graphics"}
-
-MACRO {toms} {"ACM Transactions on Mathematical Software"}
-
-MACRO {toois} {"ACM Transactions on Office Information Systems"}
-
-MACRO {toplas} {"ACM Transactions on Programming Languages and Systems"}
-
-MACRO {tcs} {"Theoretical Computer Science"}
-FUNCTION {bibinfo.check}
-{ swap$
- duplicate$ missing$
- {
- pop$ pop$
- ""
- }
- { duplicate$ empty$
- {
- swap$ pop$
- }
- { swap$
- pop$
- }
- if$
- }
- if$
-}
-FUNCTION {bibinfo.warn}
-{ swap$
- duplicate$ missing$
- {
- swap$ "missing " swap$ * " in " * cite$ * warning$ pop$
- ""
- }
- { duplicate$ empty$
- {
- swap$ "empty " swap$ * " in " * cite$ * warning$
- }
- { swap$
- pop$
- }
- if$
- }
- if$
-}
-FUNCTION {format.eprint}
-{ eprint duplicate$ empty$
- 'skip$
- { "\eprint"
- archive empty$
- 'skip$
- { "[" * archive * "]" * }
- if$
- "{" * swap$ * "}" *
- }
- if$
-}
-FUNCTION {format.url}
-{ url empty$
- { "" }
- { "\urlprefix\url{" url * "}" * }
- if$
-}
-
-STRINGS { bibinfo}
-INTEGERS { nameptr namesleft numnames }
-
-FUNCTION {format.names}
-{ 'bibinfo :=
- duplicate$ empty$ 'skip$ {
- 's :=
- "" 't :=
- #1 'nameptr :=
- s num.names$ 'numnames :=
- numnames 'namesleft :=
- { namesleft #0 > }
- { s nameptr
- "{vv~}{ll}{ jj}{ f{}}"
- format.name$
- remove.dots
- bibinfo bibinfo.check
- 't :=
- nameptr #1 >
- {
- namesleft #1 >
- { ", " * t * }
- {
- "," *
- s nameptr "{ll}" format.name$ duplicate$ "others" =
- { 't := }
- { pop$ }
- if$
- t "others" =
- {
- " " * bbl.etal emphasize *
- }
- { " " * t * }
- if$
- }
- if$
- }
- 't
- if$
- nameptr #1 + 'nameptr :=
- namesleft #1 - 'namesleft :=
- }
- while$
- } if$
-}
-FUNCTION {format.names.ed}
-{
- 'bibinfo :=
- duplicate$ empty$ 'skip$ {
- 's :=
- "" 't :=
- #1 'nameptr :=
- s num.names$ 'numnames :=
- numnames 'namesleft :=
- { namesleft #0 > }
- { s nameptr
- "{f{}~}{vv~}{ll}{ jj}"
- format.name$
- remove.dots
- bibinfo bibinfo.check
- 't :=
- nameptr #1 >
- {
- namesleft #1 >
- { ", " * t * }
- {
- "," *
- s nameptr "{ll}" format.name$ duplicate$ "others" =
- { 't := }
- { pop$ }
- if$
- t "others" =
- {
-
- " " * bbl.etal emphasize *
- }
- { " " * t * }
- if$
- }
- if$
- }
- 't
- if$
- nameptr #1 + 'nameptr :=
- namesleft #1 - 'namesleft :=
- }
- while$
- } if$
-}
-FUNCTION {format.key}
-{ empty$
- { key field.or.null }
- { "" }
- if$
-}
-
-FUNCTION {format.authors}
-{ author "author" format.names
- duplicate$ empty$ 'skip$
- { collaboration "collaboration" bibinfo.check
- duplicate$ empty$ 'skip$
- { " (" swap$ * ")" * }
- if$
- *
- }
- if$
-}
-FUNCTION {get.bbl.editor}
-{ editor num.names$ #1 > 'bbl.editors 'bbl.editor if$ }
-
-FUNCTION {format.editors}
-{ editor "editor" format.names duplicate$ empty$ 'skip$
- {
- " " *
- get.bbl.editor
- "(" swap$ * ")" *
- *
- }
- if$
-}
-FUNCTION {format.isbn}
-{ isbn "isbn" bibinfo.check
- duplicate$ empty$ 'skip$
- {
- new.block
- "ISBN " swap$ *
- }
- if$
-}
-
-FUNCTION {format.issn}
-{ issn "issn" bibinfo.check
- duplicate$ empty$ 'skip$
- {
- new.block
- "ISSN " swap$ *
- }
- if$
-}
-
-FUNCTION {format.doi}
-{ doi "doi" bibinfo.check
- duplicate$ empty$ 'skip$
- {
- new.block
- "\doi{" swap$ * "}" *
- }
- if$
-}
-FUNCTION {format.note}
-{
- note empty$
- { "" }
- { note #1 #1 substring$
- duplicate$ "{" =
- 'skip$
- { output.state mid.sentence =
- { "l" }
- { "u" }
- if$
- change.case$
- }
- if$
- note #2 global.max$ substring$ * "note" bibinfo.check
- }
- if$
-}
-
-FUNCTION {format.title}
-{ title
- "title" bibinfo.check
- duplicate$ empty$ 'skip$
- {
- "\enquote{" swap$ *
- add.period$ "}" *
- }
- if$
-}
-FUNCTION {end.quote.btitle}
-{ booktitle empty$
- 'skip$
- { before.all 'output.state := }
- if$
-}
-FUNCTION {format.full.names}
-{'s :=
- "" 't :=
- #1 'nameptr :=
- s num.names$ 'numnames :=
- numnames 'namesleft :=
- { namesleft #0 > }
- { s nameptr
- "{vv~}{ll}" format.name$
- 't :=
- nameptr #1 >
- {
- namesleft #1 >
- { ", " * t * }
- {
- s nameptr "{ll}" format.name$ duplicate$ "others" =
- { 't := }
- { pop$ }
- if$
- t "others" =
- {
- " " * bbl.etal emphasize *
- }
- {
- numnames #2 >
- { "," * }
- 'skip$
- if$
- bbl.and
- space.word * t *
- }
- if$
- }
- if$
- }
- 't
- if$
- nameptr #1 + 'nameptr :=
- namesleft #1 - 'namesleft :=
- }
- while$
-}
-
-FUNCTION {author.editor.key.full}
-{ author empty$
- { editor empty$
- { key empty$
- { cite$ #1 #3 substring$ }
- 'key
- if$
- }
- { editor format.full.names }
- if$
- }
- { author format.full.names }
- if$
-}
-
-FUNCTION {author.key.full}
-{ author empty$
- { key empty$
- { cite$ #1 #3 substring$ }
- 'key
- if$
- }
- { author format.full.names }
- if$
-}
-
-FUNCTION {editor.key.full}
-{ editor empty$
- { key empty$
- { cite$ #1 #3 substring$ }
- 'key
- if$
- }
- { editor format.full.names }
- if$
-}
-
-FUNCTION {make.full.names}
-{ type$ "book" =
- type$ "inbook" =
- or
- 'author.editor.key.full
- { type$ "proceedings" =
- 'editor.key.full
- 'author.key.full
- if$
- }
- if$
-}
-
-FUNCTION {output.bibitem}
-{ newline$
- "\bibitem[{" write$
- label write$
- ")" make.full.names duplicate$ short.list =
- { pop$ }
- { * }
- if$
- "}]{" * write$
- cite$ write$
- "}" write$
- newline$
- ""
- before.all 'output.state :=
-}
-
-FUNCTION {n.dashify}
-{
- 't :=
- ""
- { t empty$ not }
- { t #1 #1 substring$ "-" =
- { t #1 #2 substring$ "--" = not
- { "--" *
- t #2 global.max$ substring$ 't :=
- }
- { { t #1 #1 substring$ "-" = }
- { "-" *
- t #2 global.max$ substring$ 't :=
- }
- while$
- }
- if$
- }
- { t #1 #1 substring$ *
- t #2 global.max$ substring$ 't :=
- }
- if$
- }
- while$
-}
-
-FUNCTION {word.in}
-{ bbl.in capitalize
- " " * }
-
-FUNCTION {format.date}
-{ year "year" bibinfo.check duplicate$ empty$
- {
- "empty year in " cite$ * "; set to ????" * warning$
- pop$ "????"
- }
- 'skip$
- if$
- extra.label *
- before.all 'output.state :=
- " (" swap$ * ")" *
-}
-FUNCTION {format.btitle}
-{ title "title" bibinfo.check
- duplicate$ empty$ 'skip$
- {
- emphasize
- }
- if$
-}
-FUNCTION {either.or.check}
-{ empty$
- 'pop$
- { "can't use both " swap$ * " fields in " * cite$ * warning$ }
- if$
-}
-FUNCTION {format.bvolume}
-{ volume empty$
- { "" }
- { bbl.volume volume tie.or.space.prefix
- "volume" bibinfo.check * *
- series "series" bibinfo.check
- duplicate$ empty$ 'pop$
- { swap$ bbl.of space.word * swap$
- emphasize * }
- if$
- "volume and number" number either.or.check
- }
- if$
-}
-FUNCTION {format.number.series}
-{ volume empty$
- { number empty$
- { series field.or.null }
- { series empty$
- { number "number" bibinfo.check }
- { output.state mid.sentence =
- { bbl.number }
- { bbl.number capitalize }
- if$
- number tie.or.space.prefix "number" bibinfo.check * *
- bbl.in space.word *
- series "series" bibinfo.check *
- }
- if$
- }
- if$
- }
- { "" }
- if$
-}
-
-FUNCTION {format.edition}
-{ edition duplicate$ empty$ 'skip$
- {
- output.state mid.sentence =
- { "l" }
- { "t" }
- if$ change.case$
- "edition" bibinfo.check
- " " * bbl.edition *
- }
- if$
-}
-INTEGERS { multiresult }
-FUNCTION {multi.page.check}
-{ 't :=
- #0 'multiresult :=
- { multiresult not
- t empty$ not
- and
- }
- { t #1 #1 substring$
- duplicate$ "-" =
- swap$ duplicate$ "," =
- swap$ "+" =
- or or
- { #1 'multiresult := }
- { t #2 global.max$ substring$ 't := }
- if$
- }
- while$
- multiresult
-}
-FUNCTION {format.pages}
-{ pages duplicate$ empty$ 'skip$
- { duplicate$ multi.page.check
- {
- bbl.pages swap$
- n.dashify
- }
- {
- bbl.page swap$
- }
- if$
- tie.or.space.prefix
- "pages" bibinfo.check
- * *
- }
- if$
-}
-FUNCTION {format.journal.pages}
-{ pages duplicate$ empty$ 'pop$
- { swap$ duplicate$ empty$
- { pop$ pop$ format.pages }
- {
- ", " *
- swap$
- n.dashify
- "pages" bibinfo.check
- *
- }
- if$
- }
- if$
-}
-FUNCTION {format.journal.eid}
-{ eid "eid" bibinfo.check
- duplicate$ empty$ 'pop$
- { swap$ duplicate$ empty$ 'skip$
- {
- ", " *
- }
- if$
- swap$ *
- numpages empty$ 'skip$
- { bbl.eidpp numpages tie.or.space.prefix
- "numpages" bibinfo.check * *
- " (" swap$ * ")" * *
- }
- if$
- }
- if$
-}
-FUNCTION {format.vol.num.pages}
-{ volume field.or.null
- duplicate$ empty$ 'skip$
- {
- "volume" bibinfo.check
- }
- if$
- bolden
- number "number" bibinfo.check duplicate$ empty$ 'skip$
- {
- swap$ duplicate$ empty$
- { "there's a number but no volume in " cite$ * warning$ }
- 'skip$
- if$
- swap$
- "(" swap$ * ")" *
- }
- if$ *
- eid empty$
- { format.journal.pages }
- { format.journal.eid }
- if$
-}
-
-FUNCTION {format.chapter.pages}
-{ chapter empty$
- 'format.pages
- { type empty$
- { bbl.chapter }
- { type "l" change.case$
- "type" bibinfo.check
- }
- if$
- chapter tie.or.space.prefix
- "chapter" bibinfo.check
- * *
- pages empty$
- 'skip$
- { ", " * format.pages * }
- if$
- }
- if$
-}
-
-FUNCTION {bt.enquote}
-{ duplicate$ empty$ 'skip$
- { "\enquote{" swap$ *
- non.stop
- { ",} " * }
- { "}, " * }
- if$
- }
- if$
-}
-FUNCTION {format.booktitle}
-{
- booktitle "booktitle" bibinfo.check
- bt.enquote
-}
-FUNCTION {format.in.ed.booktitle}
-{ format.booktitle duplicate$ empty$ 'skip$
- {
- editor "editor" format.names.ed duplicate$ empty$ 'pop$
- {
- " " *
- get.bbl.editor
- "(" swap$ * "), " *
- * swap$
- * }
- if$
- word.in swap$ *
- }
- if$
-}
-FUNCTION {format.thesis.type}
-{ type duplicate$ empty$
- 'pop$
- { swap$ pop$
- "t" change.case$ "type" bibinfo.check
- }
- if$
-}
-FUNCTION {format.tr.number}
-{ number "number" bibinfo.check
- type duplicate$ empty$
- { pop$ bbl.techrep }
- 'skip$
- if$
- "type" bibinfo.check
- swap$ duplicate$ empty$
- { pop$ "t" change.case$ }
- { tie.or.space.prefix * * }
- if$
-}
-FUNCTION {format.article.crossref}
-{
- word.in
- " \cite{" * crossref * "}" *
-}
-FUNCTION {format.book.crossref}
-{ volume duplicate$ empty$
- { "empty volume in " cite$ * "'s crossref of " * crossref * warning$
- pop$ word.in
- }
- { bbl.volume
- capitalize
- swap$ tie.or.space.prefix "volume" bibinfo.check * * bbl.of space.word *
- }
- if$
- " \cite{" * crossref * "}" *
-}
-FUNCTION {format.incoll.inproc.crossref}
-{
- word.in
- " \cite{" * crossref * "}" *
-}
-FUNCTION {format.org.or.pub}
-{ 't :=
- ""
- address empty$ t empty$ and
- 'skip$
- {
- t empty$
- { address "address" bibinfo.check *
- }
- { t *
- address empty$
- 'skip$
- { ", " * address "address" bibinfo.check * }
- if$
- }
- if$
- }
- if$
-}
-FUNCTION {format.publisher.address}
-{ publisher "publisher" bibinfo.warn format.org.or.pub
-}
-
-FUNCTION {format.organization.address}
-{ organization "organization" bibinfo.check format.org.or.pub
-}
-
-FUNCTION {article}
-{ output.bibitem
- format.authors "author" output.check
- author format.key output
- format.date "year" output.check
- date.block
- format.title "title" output.check
- new.block
- crossref missing$
- {
- journal
- "journal" bibinfo.check
- emphasize
- "journal" output.check
- format.vol.num.pages output
- }
- { format.article.crossref output.nonnull
- format.pages output
- }
- if$
- format.issn output
- format.doi output
- new.block
- format.note output
- format.eprint output
- format.url output
- fin.entry
-}
-FUNCTION {book}
-{ output.bibitem
- author empty$
- { format.editors "author and editor" output.check
- editor format.key output
- }
- { format.authors output.nonnull
- crossref missing$
- { "author and editor" editor either.or.check }
- 'skip$
- if$
- }
- if$
- format.date "year" output.check
- date.block
- format.btitle "title" output.check
- crossref missing$
- { format.bvolume output
- new.block
- format.number.series output
- new.sentence
- format.publisher.address output
- }
- {
- new.block
- format.book.crossref output.nonnull
- }
- if$
- format.edition output
- format.isbn output
- format.doi output
- new.block
- format.note output
- format.eprint output
- format.url output
- fin.entry
-}
-FUNCTION {booklet}
-{ output.bibitem
- format.authors output
- author format.key output
- format.date "year" output.check
- date.block
- format.title "title" output.check
- new.block
- howpublished "howpublished" bibinfo.check output
- address "address" bibinfo.check output
- format.isbn output
- format.doi output
- new.block
- format.note output
- format.eprint output
- format.url output
- fin.entry
-}
-
-FUNCTION {inbook}
-{ output.bibitem
- author empty$
- { format.editors "author and editor" output.check
- editor format.key output
- }
- { format.authors output.nonnull
- crossref missing$
- { "author and editor" editor either.or.check }
- 'skip$
- if$
- }
- if$
- format.date "year" output.check
- date.block
- format.btitle "title" output.check
- crossref missing$
- {
- format.bvolume output
- format.chapter.pages "chapter and pages" output.check
- new.block
- format.number.series output
- new.sentence
- format.publisher.address output
- }
- {
- format.chapter.pages "chapter and pages" output.check
- new.block
- format.book.crossref output.nonnull
- }
- if$
- format.edition output
- crossref missing$
- { format.isbn output }
- 'skip$
- if$
- format.doi output
- new.block
- format.note output
- format.eprint output
- format.url output
- fin.entry
-}
-
-FUNCTION {incollection}
-{ output.bibitem
- format.authors "author" output.check
- author format.key output
- format.date "year" output.check
- date.block
- format.title "title" output.check
- new.block
- crossref missing$
- { format.in.ed.booktitle "booktitle" output.check
- end.quote.btitle
- format.bvolume output
- format.number.series output
- format.chapter.pages output
- new.sentence
- format.publisher.address output
- format.edition output
- format.isbn output
- }
- { format.incoll.inproc.crossref output.nonnull
- format.chapter.pages output
- }
- if$
- format.doi output
- new.block
- format.note output
- format.eprint output
- format.url output
- fin.entry
-}
-FUNCTION {inproceedings}
-{ output.bibitem
- format.authors "author" output.check
- author format.key output
- format.date "year" output.check
- date.block
- format.title "title" output.check
- new.block
- crossref missing$
- { format.in.ed.booktitle "booktitle" output.check
- end.quote.btitle
- format.bvolume output
- format.number.series output
- format.pages output
- new.sentence
- publisher empty$
- { format.organization.address output }
- { organization "organization" bibinfo.check output
- format.publisher.address output
- }
- if$
- format.isbn output
- format.issn output
- }
- { format.incoll.inproc.crossref output.nonnull
- format.pages output
- }
- if$
- format.doi output
- new.block
- format.note output
- format.eprint output
- format.url output
- fin.entry
-}
-FUNCTION {conference} { inproceedings }
-FUNCTION {manual}
-{ output.bibitem
- format.authors output
- author format.key output
- format.date "year" output.check
- date.block
- format.btitle "title" output.check
- organization address new.block.checkb
- organization "organization" bibinfo.check output
- address "address" bibinfo.check output
- format.edition output
- format.doi output
- new.block
- format.note output
- format.eprint output
- format.url output
- fin.entry
-}
-
-FUNCTION {mastersthesis}
-{ output.bibitem
- format.authors "author" output.check
- author format.key output
- format.date "year" output.check
- date.block
- format.btitle
- "title" output.check
- new.block
- bbl.mthesis format.thesis.type output.nonnull
- school "school" bibinfo.warn output
- address "address" bibinfo.check output
- format.doi output
- new.block
- format.note output
- format.eprint output
- format.url output
- fin.entry
-}
-
-FUNCTION {misc}
-{ output.bibitem
- format.authors output
- author format.key output
- format.date "year" output.check
- date.block
- format.title output
- new.block
- howpublished "howpublished" bibinfo.check output
- format.doi output
- new.block
- format.note output
- format.eprint output
- format.url output
- fin.entry
-}
-FUNCTION {phdthesis}
-{ output.bibitem
- format.authors "author" output.check
- author format.key output
- format.date "year" output.check
- date.block
- format.btitle
- "title" output.check
- new.block
- bbl.phdthesis format.thesis.type output.nonnull
- school "school" bibinfo.warn output
- address "address" bibinfo.check output
- format.doi output
- new.block
- format.note output
- format.eprint output
- format.url output
- fin.entry
-}
-
-FUNCTION {proceedings}
-{ output.bibitem
- format.editors output
- editor format.key output
- format.date "year" output.check
- date.block
- format.btitle "title" output.check
- format.bvolume output
- format.number.series output
- new.sentence
- publisher empty$
- { format.organization.address output }
- { organization "organization" bibinfo.check output
- format.publisher.address output
- }
- if$
- format.isbn output
- format.issn output
- format.doi output
- new.block
- format.note output
- format.eprint output
- format.url output
- fin.entry
-}
-
-FUNCTION {techreport}
-{ output.bibitem
- format.authors "author" output.check
- author format.key output
- format.date "year" output.check
- date.block
- format.title
- "title" output.check
- new.block
- format.tr.number emphasize output.nonnull
- institution "institution" bibinfo.warn output
- address "address" bibinfo.check output
- format.doi output
- new.block
- format.note output
- format.eprint output
- format.url output
- fin.entry
-}
-
-FUNCTION {unpublished}
-{ output.bibitem
- format.authors "author" output.check
- author format.key output
- format.date "year" output.check
- date.block
- format.title "title" output.check
- format.doi output
- new.block
- format.note "note" output.check
- format.eprint output
- format.url output
- fin.entry
-}
-
-FUNCTION {default.type} { misc }
-READ
-FUNCTION {sortify}
-{ purify$
- "l" change.case$
-}
-INTEGERS { len }
-FUNCTION {chop.word}
-{ 's :=
- 'len :=
- s #1 len substring$ =
- { s len #1 + global.max$ substring$ }
- 's
- if$
-}
-FUNCTION {format.lab.names}
-{ 's :=
- "" 't :=
- s #1 "{vv~}{ll}" format.name$
- s num.names$ duplicate$
- #2 >
- { pop$
- " " * bbl.etal emphasize *
- }
- { #2 <
- 'skip$
- { s #2 "{ff }{vv }{ll}{ jj}" format.name$ "others" =
- {
- " " * bbl.etal emphasize *
- }
- { bbl.and space.word * s #2 "{vv~}{ll}" format.name$
- * }
- if$
- }
- if$
- }
- if$
-}
-
-FUNCTION {author.key.label}
-{ author empty$
- { key empty$
- { cite$ #1 #3 substring$ }
- 'key
- if$
- }
- { author format.lab.names }
- if$
-}
-
-FUNCTION {author.editor.key.label}
-{ author empty$
- { editor empty$
- { key empty$
- { cite$ #1 #3 substring$ }
- 'key
- if$
- }
- { editor format.lab.names }
- if$
- }
- { author format.lab.names }
- if$
-}
-
-FUNCTION {editor.key.label}
-{ editor empty$
- { key empty$
- { cite$ #1 #3 substring$ }
- 'key
- if$
- }
- { editor format.lab.names }
- if$
-}
-
-FUNCTION {calc.short.authors}
-{ type$ "book" =
- type$ "inbook" =
- or
- 'author.editor.key.label
- { type$ "proceedings" =
- 'editor.key.label
- 'author.key.label
- if$
- }
- if$
- 'short.list :=
-}
-
-FUNCTION {calc.label}
-{ calc.short.authors
- short.list
- "("
- *
- year duplicate$ empty$
- short.list key field.or.null = or
- { pop$ "" }
- 'skip$
- if$
- *
- 'label :=
-}
-
-FUNCTION {sort.format.names}
-{ 's :=
- #1 'nameptr :=
- ""
- s num.names$ 'numnames :=
- numnames 'namesleft :=
- { namesleft #0 > }
- { s nameptr
- "{vv{ } }{ll{ }}{ f{ }}{ jj{ }}"
- format.name$ 't :=
- nameptr #1 >
- {
- " " *
- namesleft #1 = t "others" = and
- { "zzzzz" * }
- { t sortify * }
- if$
- }
- { t sortify * }
- if$
- nameptr #1 + 'nameptr :=
- namesleft #1 - 'namesleft :=
- }
- while$
-}
-
-FUNCTION {sort.format.title}
-{ 't :=
- "A " #2
- "An " #3
- "The " #4 t chop.word
- chop.word
- chop.word
- sortify
- #1 global.max$ substring$
-}
-FUNCTION {author.sort}
-{ author empty$
- { key empty$
- { "to sort, need author or key in " cite$ * warning$
- ""
- }
- { key sortify }
- if$
- }
- { author sort.format.names }
- if$
-}
-FUNCTION {author.editor.sort}
-{ author empty$
- { editor empty$
- { key empty$
- { "to sort, need author, editor, or key in " cite$ * warning$
- ""
- }
- { key sortify }
- if$
- }
- { editor sort.format.names }
- if$
- }
- { author sort.format.names }
- if$
-}
-FUNCTION {editor.sort}
-{ editor empty$
- { key empty$
- { "to sort, need editor or key in " cite$ * warning$
- ""
- }
- { key sortify }
- if$
- }
- { editor sort.format.names }
- if$
-}
-FUNCTION {presort}
-{ calc.label
- label sortify
- " "
- *
- type$ "book" =
- type$ "inbook" =
- or
- 'author.editor.sort
- { type$ "proceedings" =
- 'editor.sort
- 'author.sort
- if$
- }
- if$
- #1 entry.max$ substring$
- 'sort.label :=
- sort.label
- *
- " "
- *
- title field.or.null
- sort.format.title
- *
- #1 entry.max$ substring$
- 'sort.key$ :=
-}
-
-ITERATE {presort}
-SORT
-STRINGS { last.label next.extra }
-INTEGERS { last.extra.num number.label }
-FUNCTION {initialize.extra.label.stuff}
-{ #0 int.to.chr$ 'last.label :=
- "" 'next.extra :=
- #0 'last.extra.num :=
- #0 'number.label :=
-}
-FUNCTION {forward.pass}
-{ last.label label =
- { last.extra.num #1 + 'last.extra.num :=
- last.extra.num int.to.chr$ 'extra.label :=
- }
- { "a" chr.to.int$ 'last.extra.num :=
- "" 'extra.label :=
- label 'last.label :=
- }
- if$
- number.label #1 + 'number.label :=
-}
-FUNCTION {reverse.pass}
-{ next.extra "b" =
- { "a" 'extra.label := }
- 'skip$
- if$
- extra.label 'next.extra :=
- extra.label
- duplicate$ empty$
- 'skip$
- { "{\natexlab{" swap$ * "}}" * }
- if$
- 'extra.label :=
- label extra.label * 'label :=
-}
-EXECUTE {initialize.extra.label.stuff}
-ITERATE {forward.pass}
-REVERSE {reverse.pass}
-FUNCTION {bib.sort.order}
-{ sort.label
- " "
- *
- year field.or.null sortify
- *
- " "
- *
- title field.or.null
- sort.format.title
- *
- #1 entry.max$ substring$
- 'sort.key$ :=
-}
-ITERATE {bib.sort.order}
-SORT
-FUNCTION {begin.bib}
-{ preamble$ empty$
- 'skip$
- { preamble$ write$ newline$ }
- if$
- "\begin{thebibliography}{" number.label int.to.str$ * "}" *
- write$ newline$
- "\newcommand{\enquote}[1]{``#1''}"
- write$ newline$
- "\providecommand{\natexlab}[1]{#1}"
- write$ newline$
- "\providecommand{\url}[1]{\texttt{#1}}"
- write$ newline$
- "\providecommand{\urlprefix}{URL }"
- write$ newline$
- "\expandafter\ifx\csname urlstyle\endcsname\relax"
- write$ newline$
- " \providecommand{\doi}[1]{doi:\discretionary{}{}{}#1}\else"
- write$ newline$
- " \providecommand{\doi}{doi:\discretionary{}{}{}\begingroup \urlstyle{rm}\Url}\fi"
- write$ newline$
- "\providecommand{\eprint}[2][]{\url{#2}}"
- write$ newline$
-}
-EXECUTE {begin.bib}
-EXECUTE {init.state.consts}
-ITERATE {call.type$}
-FUNCTION {end.bib}
-{ newline$
- "\end{thebibliography}" write$ newline$
-}
-EXECUTE {end.bib}
-%% End of customized bst file
-%%
-%% End of file `jss.bst'.
diff --git a/inst/doc/jss.cls b/inst/doc/jss.cls
deleted file mode 100644
index 3aa0b85..0000000
--- a/inst/doc/jss.cls
+++ /dev/null
@@ -1,473 +0,0 @@
-%%
-%% This is file `jss.cls',
-%% generated with the docstrip utility.
-%%
-%% The original source files were:
-%%
-%% jss.dtx (with options: `class')
-%%
-%% IMPORTANT NOTICE:
-%%
-%% For the copyright see the source file.
-%%
-%% Any modified versions of this file must be renamed
-%% with new filenames distinct from jss.cls.
-%%
-%% For distribution of the original source see the terms
-%% for copying and modification in the file jss.dtx.
-%%
-%% This generated file may be distributed as long as the
-%% original source files, as listed above, are part of the
-%% same distribution. (The sources need not necessarily be
-%% in the same archive or directory.)
-\def\fileversion{1.8}
-\def\filename{jss}
-\def\filedate{2008/04/07}
-%%
-%% Package `jss' to use with LaTeX2e for JSS publications
-%% http://www.jstatsoft.org/
-%% Copyright (C) 2004 Achim Zeileis
-%% Please report errors to Achim.Zeileis at R-project.org
-%%
-\NeedsTeXFormat{LaTeX2e}
-\ProvidesClass{jss}[\filedate\space\fileversion\space jss class by Achim Zeileis]
-%% options
-\newif\if at article
-\newif\if at codesnippet
-\newif\if at bookreview
-\newif\if at softwarereview
-\newif\if at review
-\newif\if at shortnames
-\newif\if at nojss
-
-\@articletrue
-\@codesnippetfalse
-\@bookreviewfalse
-\@softwarereviewfalse
-\@reviewfalse
-\@shortnamesfalse
-\@nojssfalse
-
-\DeclareOption{article}{\@articletrue%
- \@codesnippetfalse \@bookreviewfalse \@softwarereviewfalse}
-\DeclareOption{codesnippet}{\@articlefalse%
- \@codesnippettrue \@bookreviewfalse \@softwarereviewfalse}
-\DeclareOption{bookreview}{\@articlefalse%
- \@codesnippetfalse \@bookreviewtrue \@softwarereviewfalse}
-\DeclareOption{softwarereview}{\@articlefalse%
- \@codesnippetfalse \@bookreviewfalse \@softwarereviewtrue}
-\DeclareOption{shortnames}{\@shortnamestrue}
-\DeclareOption{nojss}{\@nojsstrue}
-
-\ProcessOptions
-\LoadClass[11pt,a4paper,twoside]{article}
-%% required packages
-\RequirePackage{graphicx,a4wide,color,ae,fancyvrb}
-\RequirePackage[T1]{fontenc}
-\IfFileExists{upquote.sty}{\RequirePackage{upquote}}{}
-%% bibliography
-\if at shortnames
- \usepackage[authoryear,round]{natbib}
-\else
- \usepackage[authoryear,round,longnamesfirst]{natbib}
-\fi
-\bibpunct{(}{)}{;}{a}{}{,}
-\bibliographystyle{jss}
-%% paragraphs
-\setlength{\parskip}{0.7ex plus0.1ex minus0.1ex}
-\setlength{\parindent}{0em}
-%% for all publications
-\newcommand{\Address}[1]{\def\@Address{#1}}
-\newcommand{\Plaintitle}[1]{\def\@Plaintitle{#1}}
-\newcommand{\Shorttitle}[1]{\def\@Shorttitle{#1}}
-\newcommand{\Plainauthor}[1]{\def\@Plainauthor{#1}}
-\newcommand{\Volume}[1]{\def\@Volume{#1}}
-\newcommand{\Year}[1]{\def\@Year{#1}}
-\newcommand{\Month}[1]{\def\@Month{#1}}
-\newcommand{\Issue}[1]{\def\@Issue{#1}}
-\newcommand{\Submitdate}[1]{\def\@Submitdate{#1}}
-%% for articles and code snippets
-\newcommand{\Acceptdate}[1]{\def\@Acceptdate{#1}}
-\newcommand{\Abstract}[1]{\def\@Abstract{#1}}
-\newcommand{\Keywords}[1]{\def\@Keywords{#1}}
-\newcommand{\Plainkeywords}[1]{\def\@Plainkeywords{#1}}
-%% for book and software reviews
-\newcommand{\Reviewer}[1]{\def\@Reviewer{#1}}
-\newcommand{\Booktitle}[1]{\def\@Booktitle{#1}}
-\newcommand{\Bookauthor}[1]{\def\@Bookauthor{#1}}
-\newcommand{\Publisher}[1]{\def\@Publisher{#1}}
-\newcommand{\Pubaddress}[1]{\def\@Pubaddress{#1}}
-\newcommand{\Pubyear}[1]{\def\@Pubyear{#1}}
-\newcommand{\ISBN}[1]{\def\@ISBN{#1}}
-\newcommand{\Pages}[1]{\def\@Pages{#1}}
-\newcommand{\Price}[1]{\def\@Price{#1}}
-\newcommand{\Plainreviewer}[1]{\def\@Plainreviewer{#1}}
-\newcommand{\Softwaretitle}[1]{\def\@Softwaretitle{#1}}
-\newcommand{\URL}[1]{\def\@URL{#1}}
-%% for internal use
-\newcommand{\Seriesname}[1]{\def\@Seriesname{#1}}
-\newcommand{\Hypersubject}[1]{\def\@Hypersubject{#1}}
-\newcommand{\Hyperauthor}[1]{\def\@Hyperauthor{#1}}
-\newcommand{\Footername}[1]{\def\@Footername{#1}}
-\newcommand{\Firstdate}[1]{\def\@Firstdate{#1}}
-\newcommand{\Seconddate}[1]{\def\@Seconddate{#1}}
-\newcommand{\Reviewauthor}[1]{\def\@Reviewauthor{#1}}
-%% defaults
-\author{Firstname Lastname\\Affiliation}
-\title{Title}
-\Abstract{---!!!---an abstract is required---!!!---}
-\Plainauthor{\@author}
-\Volume{VV}
-\Year{YYYY}
-\Month{MMMMMM}
-\Issue{II}
-\Submitdate{yyyy-mm-dd}
-\Acceptdate{yyyy-mm-dd}
-\Address{
- Firstname Lastname\\
- Affiliation\\
- Address, Country\\
- E-mail: \email{name at address}\\
- URL: \url{http://link/to/webpage/}
-}
-
-\Reviewer{Firstname Lastname\\Affiliation}
-\Plainreviewer{Firstname Lastname}
-\Booktitle{Book Title}
-\Bookauthor{Book Author}
-\Publisher{Publisher}
-\Pubaddress{Publisher's Address}
-\Pubyear{YYY}
-\ISBN{x-xxxxx-xxx-x}
-\Pages{xv + 123}
-\Price{USD 69.95 (P)}
-\URL{http://link/to/webpage/}
-\if at article
- \Seriesname{Issue}
- \Hypersubject{Journal of Statistical Software}
- \Plaintitle{\@title}
- \Shorttitle{\@title}
- \Plainkeywords{\@Keywords}
-\fi
-
-\if at codesnippet
- \Seriesname{Code Snippet}
- \Hypersubject{Journal of Statistical Software -- Code Snippets}
- \Plaintitle{\@title}
- \Shorttitle{\@title}
- \Plainkeywords{\@Keywords}
-\fi
-
-\if at bookreview
- \Seriesname{Book Review}
- \Hypersubject{Journal of Statistical Software -- Book Reviews}
- \Plaintitle{\@Booktitle}
- \Shorttitle{\@Booktitle}
- \Reviewauthor{\@Bookauthor\\
- \@Publisher, \@Pubaddress, \@Pubyear.\\
- ISBN~\@ISBN. \@Pages~pp. \@Price.\\
- \url{\@URL}}
- \Plainkeywords{}
- \@reviewtrue
-\fi
-
-\if at softwarereview
- \Seriesname{Software Review}
- \Hypersubject{Journal of Statistical Software -- Software Reviews}
- \Plaintitle{\@Softwaretitle}
- \Shorttitle{\@Softwaretitle}
- \Booktitle{\@Softwaretitle}
- \Reviewauthor{\@Publisher, \@Pubaddress. \@Price.\\
- \url{\@URL}}
- \Plainkeywords{}
- \@reviewtrue
-\fi
-
-\if at review
- \Hyperauthor{\@Plainreviewer}
- \Keywords{}
- \Footername{Reviewer}
- \Firstdate{\textit{Published:} \@Submitdate}
- \Seconddate{}
-\else
- \Hyperauthor{\@Plainauthor}
- \Keywords{---!!!---at least one keyword is required---!!!---}
- \Footername{Affiliation}
- \Firstdate{\textit{Submitted:} \@Submitdate}
- \Seconddate{\textit{Accepted:} \@Acceptdate}
-\fi
-%% Sweave(-like)
-\DefineVerbatimEnvironment{Sinput}{Verbatim}{fontshape=sl}
-\DefineVerbatimEnvironment{Soutput}{Verbatim}{}
-\DefineVerbatimEnvironment{Scode}{Verbatim}{fontshape=sl}
-%%%% zz commented out 20090609:
-\newenvironment{Schunk}{}{}
-\DefineVerbatimEnvironment{Code}{Verbatim}{}
-\DefineVerbatimEnvironment{CodeInput}{Verbatim}{fontshape=sl}
-\DefineVerbatimEnvironment{CodeOutput}{Verbatim}{}
-\newenvironment{CodeChunk}{}{}
-\setkeys{Gin}{width=0.8\textwidth}
-%% footer
-\newlength{\footerskip}
-\setlength{\footerskip}{2.5\baselineskip plus 2ex minus 0.5ex}
-
-\newcommand{\makefooter}{%
- \vspace{\footerskip}
-
- \if at nojss
- \begin{samepage}
- \textbf{\large \@Footername: \nopagebreak}\\[.3\baselineskip] \nopagebreak
- \@Address \nopagebreak
- \end{samepage}
- \else
- \begin{samepage}
- \textbf{\large \@Footername: \nopagebreak}\\[.3\baselineskip] \nopagebreak
- \@Address \nopagebreak
- \vfill
- \hrule \nopagebreak
- \vspace{.1\baselineskip}
- {\fontfamily{pzc} \fontsize{13}{15} \selectfont Journal of Statistical Software}
- \hfill
- \url{http://www.jstatsoft.org/}\\ \nopagebreak
- published by the American Statistical Association
- \hfill
- \url{http://www.amstat.org/}\\[.3\baselineskip] \nopagebreak
- {Volume~\@Volume, \@Seriesname~\@Issue}
- \hfill
- \@Firstdate\\ \nopagebreak
- {\@Month{} \@Year}
- \hfill
- \@Seconddate \nopagebreak
- \vspace{.3\baselineskip}
- \hrule
- \end{samepage}
- \fi
-}
-\AtEndDocument{\makefooter}
-%% required packages
-\RequirePackage{hyperref}
-%% new \maketitle
-\def\@myoddhead{
- {\color{white} JSS}\\[-1.42cm]
- \hspace{-2em} \includegraphics[height=23mm,keepaspectratio]{jsslogo} \hfill
- \parbox[b][23mm]{118mm}{\hrule height 3pt
- \center{
- {\fontfamily{pzc} \fontsize{28}{32} \selectfont Journal of Statistical Software}
- \vfill
- {\it \small \@Month{} \@Year, Volume~\@Volume, \@Seriesname~\@Issue.%
- \hfill \href{http://www.jstatsoft.org/}{http://www.jstatsoft.org/}}}\\[0.1cm]
- \hrule height 3pt}}
-\if at review
- \renewcommand{\maketitle}{
- \if at nojss
- %% \@oddhead{\@myoddhead}\\[3\baselineskip]
- \else
- \@oddhead{\@myoddhead}\\[3\baselineskip]
- \fi
- {\large
- \noindent
- Reviewer: \@Reviewer
- \vspace{\baselineskip}
- \hrule
- \vspace{\baselineskip}
- \textbf{\@Booktitle}
- \begin{quotation} \noindent
- \@Reviewauthor
- \end{quotation}
- \vspace{0.7\baselineskip}
- \hrule
- \vspace{1.3\baselineskip}
- }
-
- \thispagestyle{empty}
- \if at nojss
- \markboth{\centerline{\@Shorttitle}}{\centerline{\@Hyperauthor}}
- \else
- \markboth{\centerline{\@Shorttitle}}{\centerline{\@Hypersubject}}
- \fi
- \pagestyle{myheadings}
- }
-\else
- \def\maketitle{
- \if at nojss
- %% \@oddhead{\@myoddhead} \par
- \else
- \@oddhead{\@myoddhead} \par
- \fi
- \begingroup
- \def\thefootnote{\fnsymbol{footnote}}
- \def\@makefnmark{\hbox to 0pt{$^{\@thefnmark}$\hss}}
- \long\def\@makefntext##1{\parindent 1em\noindent
- \hbox to1.8em{\hss $\m at th ^{\@thefnmark}$}##1}
- \@maketitle \@thanks
- \endgroup
- \setcounter{footnote}{0}
- \thispagestyle{empty}
- \if at nojss
- \markboth{\centerline{\@Shorttitle}}{\centerline{\@Hyperauthor}}
- \else
- \markboth{\centerline{\@Shorttitle}}{\centerline{\@Hypersubject}}
- \fi
- \pagestyle{myheadings}
-
- \let\maketitle\relax \let\@maketitle\relax
- \gdef\@thanks{}\gdef\@author{}\gdef\@title{}\let\thanks\relax
- }
-
- \def\@maketitle{\vbox{\hsize\textwidth \linewidth\hsize
- \if at nojss
- %% \vskip 1in
- \else
- \vskip 1in
- \fi
- {\centering
- {\LARGE\bf \@title\par}
- \vskip 0.2in plus 1fil minus 0.1in
- {
- \def\and{\unskip\enspace{\rm and}\enspace}%
- \def\And{\end{tabular}\hss \egroup \hskip 1in plus 2fil
- \hbox to 0pt\bgroup\hss \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\ignorespaces}%
- \def\AND{\end{tabular}\hss\egroup \hfil\hfil\egroup
- \vskip 0.1in plus 1fil minus 0.05in
- \hbox to \linewidth\bgroup\rule{\z@}{10pt} \hfil\hfil
- \hbox to 0pt\bgroup\hss \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\ignorespaces}
- \hbox to \linewidth\bgroup\rule{\z@}{10pt} \hfil\hfil
- \hbox to 0pt\bgroup\hss \begin{tabular}[t]{c}\large\bf\rule{\z@}{24pt}\@author
- \end{tabular}\hss\egroup
- \hfil\hfil\egroup}
- \vskip 0.3in minus 0.1in
- \hrule
- \begin{abstract}
- \@Abstract
- \end{abstract}}
- \textit{Keywords}:~\@Keywords.
- \vskip 0.1in minus 0.05in
- \hrule
- \vskip 0.2in minus 0.1in
- }}
-\fi
-%% sections, subsections, and subsubsections
-\newlength{\preXLskip}
-\newlength{\preLskip}
-\newlength{\preMskip}
-\newlength{\preSskip}
-\newlength{\postMskip}
-\newlength{\postSskip}
-\setlength{\preXLskip}{1.8\baselineskip plus 0.5ex minus 0ex}
-\setlength{\preLskip}{1.5\baselineskip plus 0.3ex minus 0ex}
-\setlength{\preMskip}{1\baselineskip plus 0.2ex minus 0ex}
-\setlength{\preSskip}{.8\baselineskip plus 0.2ex minus 0ex}
-\setlength{\postMskip}{.5\baselineskip plus 0ex minus 0.1ex}
-\setlength{\postSskip}{.3\baselineskip plus 0ex minus 0.1ex}
-
-\newcommand{\jsssec}[2][default]{\vskip \preXLskip%
- \pdfbookmark[1]{#1}{Section.\thesection.#1}%
- \refstepcounter{section}%
- \centerline{\textbf{\Large \thesection. #2}} \nopagebreak
- \vskip \postMskip \nopagebreak}
-\newcommand{\jsssecnn}[1]{\vskip \preXLskip%
- \centerline{\textbf{\Large #1}} \nopagebreak
- \vskip \postMskip \nopagebreak}
-
-\newcommand{\jsssubsec}[2][default]{\vskip \preMskip%
- \pdfbookmark[2]{#1}{Subsection.\thesubsection.#1}%
- \refstepcounter{subsection}%
- \textbf{\large \thesubsection. #2} \nopagebreak
- \vskip \postSskip \nopagebreak}
-\newcommand{\jsssubsecnn}[1]{\vskip \preMskip%
- \textbf{\large #1} \nopagebreak
- \vskip \postSskip \nopagebreak}
-
-\newcommand{\jsssubsubsec}[2][default]{\vskip \preSskip%
- \pdfbookmark[3]{#1}{Subsubsection.\thesubsubsection.#1}%
- \refstepcounter{subsubsection}%
- {\large \textit{#2}} \nopagebreak
- \vskip \postSskip \nopagebreak}
-\newcommand{\jsssubsubsecnn}[1]{\vskip \preSskip%
- {\textit{\large #1}} \nopagebreak
- \vskip \postSskip \nopagebreak}
-
-\newcommand{\jsssimplesec}[2][default]{\vskip \preLskip%
-%% \pdfbookmark[1]{#1}{Section.\thesection.#1}%
- \refstepcounter{section}%
- \textbf{\large #1} \nopagebreak
- \vskip \postSskip \nopagebreak}
-\newcommand{\jsssimplesecnn}[1]{\vskip \preLskip%
- \textbf{\large #1} \nopagebreak
- \vskip \postSskip \nopagebreak}
-
-\if at review
- \renewcommand{\section}{\secdef \jsssimplesec \jsssimplesecnn}
- \renewcommand{\subsection}{\secdef \jsssimplesec \jsssimplesecnn}
- \renewcommand{\subsubsection}{\secdef \jsssimplesec \jsssimplesecnn}
-\else
- \renewcommand{\section}{\secdef \jsssec \jsssecnn}
- \renewcommand{\subsection}{\secdef \jsssubsec \jsssubsecnn}
- \renewcommand{\subsubsection}{\secdef \jsssubsubsec \jsssubsubsecnn}
-\fi
-%% colors
-\definecolor{Red}{rgb}{0.5,0,0}
-\definecolor{Blue}{rgb}{0,0,0.5}
-\if at review
- \hypersetup{%
- hyperindex = {true},
- colorlinks = {true},
- linktocpage = {true},
- plainpages = {false},
- linkcolor = {Blue},
- citecolor = {Blue},
- urlcolor = {Red},
- pdfstartview = {Fit},
- pdfpagemode = {None},
- pdfview = {XYZ null null null}
- }
-\else
- \hypersetup{%
- hyperindex = {true},
- colorlinks = {true},
- linktocpage = {true},
- plainpages = {false},
- linkcolor = {Blue},
- citecolor = {Blue},
- urlcolor = {Red},
- pdfstartview = {Fit},
- pdfpagemode = {UseOutlines},
- pdfview = {XYZ null null null}
- }
-\fi
-\if at nojss
- \AtBeginDocument{
- \hypersetup{%
- pdfauthor = {\@Hyperauthor},
- pdftitle = {\@Plaintitle},
- pdfkeywords = {\@Plainkeywords}
- }
- }
-\else
- \AtBeginDocument{
- \hypersetup{%
- pdfauthor = {\@Hyperauthor},
- pdftitle = {\@Plaintitle},
- pdfsubject = {\@Hypersubject},
- pdfkeywords = {\@Plainkeywords}
- }
- }
-\fi
-\AtBeginDocument{\maketitle}
-%% commands
-\makeatletter
-\newcommand\code{\bgroup\@makeother\_\@makeother\~\@makeother\$\@codex}
-\def\@codex#1{{\normalfont\ttfamily\hyphenchar\font=-1 #1}\egroup}
-\makeatother
-%%\let\code=\texttt
-\let\proglang=\textsf
-\newcommand{\pkg}[1]{{\fontseries{b}\selectfont #1}}
-\newcommand{\email}[1]{\href{mailto:#1}{\normalfont\texttt{#1}}}
-\newcommand{\doi}[1]{\href{http://dx.doi.org/#1}{\normalfont\texttt{doi:#1}}}
-\newcommand{\E}{\mathsf{E}}
-\newcommand{\VAR}{\mathsf{VAR}}
-\newcommand{\COV}{\mathsf{COV}}
-\newcommand{\Prob}{\mathsf{P}}
-\endinput
-%%
-%% End of file `jss.cls'.
diff --git a/man/ABO.Rd b/man/ABO.Rd
index f49a2e4..07417a1 100644
--- a/man/ABO.Rd
+++ b/man/ABO.Rd
@@ -39,18 +39,21 @@ ABO(link = "logit", earg=list(), ipA = NULL, ipO = NULL)
\code{pB=q},
\code{pO=r}.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\references{
Lange, K. (2002)
\emph{Mathematical and Statistical Methods for Genetic Analysis},
2nd ed. New York: Springer-Verlag.
+
}
\author{ T. W. Yee }
\note{
@@ -60,6 +63,7 @@ ABO(link = "logit", earg=list(), ipA = NULL, ipO = NULL)
proportions (so each row adds to 1) and the \code{weights}
argument is used to specify the total number of counts for each row.
+
}
\seealso{
@@ -69,13 +73,14 @@ ABO(link = "logit", earg=list(), ipA = NULL, ipO = NULL)
\code{\link{G1G2G3}},
\code{\link{MNSs}}.
+
}
\examples{
-y = cbind(A=725, B=258, AB=72, O=1073) # Order matters, not the name
-fit = vglm(y ~ 1, ABO(link=identity), trace=TRUE, cri="coef")
-coef(fit, matrix=TRUE)
+ymatrix = cbind(A = 725, B = 258, AB = 72, O = 1073) # Order matters, not the name
+fit = vglm(ymatrix ~ 1, ABO(link = identity), trace = TRUE, cri = "coef")
+coef(fit, matrix = TRUE)
Coef(fit) # Estimated pA and pB
-rbind(y, sum(y)*fitted(fit))
+rbind(ymatrix, sum(ymatrix) * fitted(fit))
sqrt(diag(vcov(fit)))
}
\keyword{models}
diff --git a/man/BratUC.Rd b/man/BratUC.Rd
deleted file mode 100644
index d917c2c..0000000
--- a/man/BratUC.Rd
+++ /dev/null
@@ -1,96 +0,0 @@
-\name{Brat}
-\alias{Brat}
-%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ Inputting Data to fit a Bradley Terry Model }
-\description{
- Takes in a square matrix of counts and outputs
- them in a form that is accessible to the \code{\link{brat}}
- and \code{\link{bratt}} family functions.
-
-}
-\usage{
-Brat(mat, ties = 0 * mat, string = c(" > "," == "))
-}
-%- maybe also 'usage' for other objects documented here.
-\arguments{
- \item{mat}{ Matrix of counts,
- which is considered \eqn{M} by \eqn{M} in dimension when
- there are ties, and \eqn{M+1} by \eqn{M+1}
- when there are no ties.
- The rows are winners and the columns are losers, e.g.,
- the 2-1 element is now many times Competitor 2 has beaten
- Competitor 1.
- The matrices are best labelled with the competitors' names.
-
-
-}
- \item{ties}{ Matrix of counts. This should be the same
- dimension as \code{mat}. By default, there are no ties.
- The matrix must be symmetric, and the diagonal should contain
- \code{NA}s.
-
-
-}
- \item{string}{ Character.
- The matrices are labelled with the first value of the descriptor, e.g.,
- \code{"NZ > Oz"} `means' NZ beats Australia in rugby.
- Suggested alternatives include \code{" beats "} or \code{" wins against "}.
- The second value is used to handle ties.
-
-
-}
-}
-\details{
- In the \pkg{VGAM} package it is necessary for each matrix to
- be represented as a single row of data by \code{\link{brat}} and
- \code{\link{bratt}}. Hence the non-diagonal elements of the \eqn{M+1}
- by \eqn{M+1} matrix are concatenated into \eqn{M(M+1)} values (no ties),
- while if there are ties, the non-diagonal elements of the \eqn{M}
- by \eqn{M} matrix are concatenated into \eqn{M(M-1)} values.
-
-
-}
-\value{
- A matrix with 1 row and either \eqn{M(M+1)} or \eqn{M(M-1)} columns.
-
-
-}
-\references{
-
-
-Agresti, A. (2002)
-\emph{Categorical Data Analysis},
-2nd ed. New York: Wiley.
-
-
-}
-\author{ T. W. Yee }
-\note{
-
-
-This is a data preprocessing function for
-\code{\link{brat}} and \code{\link{bratt}}.
-
-
-Yet to do: merge \code{InverseBrat} into \code{brat}.
-
-
-}
-\seealso{
-
- \code{\link{brat}},
- \code{\link{bratt}},
- \code{InverseBrat}.
-
-}
-\examples{
-journal = c("Biometrika", "Comm Statist", "JASA", "JRSS-B")
-m = matrix(c( NA, 33, 320, 284, 730, NA, 813, 276,
- 498, 68, NA, 325, 221, 17, 142, NA), 4, 4)
-dimnames(m) = list(winner = journal, loser = journal)
-Brat(m)
-vglm(Brat(m) ~ 1, brat, trace = TRUE)
-}
-\keyword{models}
-\keyword{regression}
-
diff --git a/man/Inv.gaussian.Rd b/man/Inv.gaussian.Rd
index cc11c56..0ba90fc 100644
--- a/man/Inv.gaussian.Rd
+++ b/man/Inv.gaussian.Rd
@@ -78,7 +78,7 @@ New York: Wiley.
plot(x, dinv.gaussian(x, mu = 1, lambda = 1), type = "l",
col = "blue",las = 1, main =
"blue is density, orange is cumulative distribution function")
-abline(h = 0, col = "black", lty = 2)
+abline(h = 0, col = "gray", lty = 2)
lines(x, pinv.gaussian(x, mu = 1, lambda = 1), type = "l", col = "orange") }
}
\keyword{distribution}
diff --git a/man/Qvar.Rd b/man/Qvar.Rd
index 5760eb7..9082588 100644
--- a/man/Qvar.Rd
+++ b/man/Qvar.Rd
@@ -15,9 +15,8 @@ Quasi-variances Preprocessing Function
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
-Qvar(object, factorname = NULL, coef.indices = NULL,
- labels = NULL, dispersion = NULL, reference.name = "(reference)",
- estimates = NULL)
+Qvar(object, factorname = NULL, coef.indices = NULL, labels = NULL,
+ dispersion = NULL, reference.name = "(reference)", estimates = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -37,7 +36,7 @@ Qvar(object, factorname = NULL, coef.indices = NULL,
If the \code{\link{vglm}} object contains more than one
factor as explanatory variable then this argument should
be the name of the factor of interest.
- If \code{object} is a variance-covariance matrix then
+ If \code{object} is a variance-covariance matrix then
this argument should also be specified.
@@ -84,19 +83,19 @@ Qvar(object, factorname = NULL, coef.indices = NULL,
conventional output. From the complete variance-covariance matrix one
can compute \eqn{L} quasi-variances based on all pairwise difference
of the coefficients. They are based on an approximation, and can be
- treated as uncorrelated. In minimizing the relative (not absolute)
+ treated as uncorrelated. In minimizing the relative (not absolute)
errors it is not hard to see that the estimation involves a RCAM
(\code{\link{rcam}}) with an exponential link function
(\code{\link{explink}}).
If \code{object} is a model, then at least one of \code{factorname} or
- \code{coef.indices} must be non-\code{NULL}. The value of
+ \code{coef.indices} must be non-\code{NULL}. The value of
\code{coef.indices}, if non-\code{NULL}, determines which rows and
- columns of the model's variance-covariance matrix to use. If
+ columns of the model's variance-covariance matrix to use. If
\code{coef.indices} contains a zero, an extra row and column are
included at the indicated position, to represent the zero variances
- and covariances associated with a reference level. If
+ and covariances associated with a reference level. If
\code{coef.indices} is \code{NULL}, then \code{factorname} should be
the name of a factor effect in the model, and is used in order to
extract the necessary variance-covariance estimates.
@@ -135,7 +134,7 @@ Qvar(object, factorname = NULL, coef.indices = NULL,
\emph{Sociological Methodology} \bold{33}, 1--18.
- Firth, D. and Menezes, R. X. de (2004)
+ Firth, D. and de Menezes, R. X. (2004)
Quasi-variances.
\emph{Biometrika} \bold{91}, 65--80.
@@ -160,9 +159,9 @@ Qvar(object, factorname = NULL, coef.indices = NULL,
It is important to set \code{maxit} to be larger than usual for
- \code{\link{rcam}} since convergence is slow. Upon successful
+ \code{\link{rcam}} since convergence is slow. Upon successful
convergence the \eqn{i}th row effect and the \eqn{i}th column effect
- should be equal. A simple computation involving the fitted and
+ should be equal. A simple computation involving the fitted and
predicted values allows the quasi-variances to be extracted (see
example below).
@@ -175,12 +174,12 @@ Qvar(object, factorname = NULL, coef.indices = NULL,
\section{Warning }{
Negative quasi-variances may occur (one of them and
only one), though they are rare in practice. If
- so then numerical problems may occur. See
+ so then numerical problems may occur. See
\code{qvcalc()} for more information.
-}
+}
\seealso{
@@ -195,10 +194,7 @@ Qvar(object, factorname = NULL, coef.indices = NULL,
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
-library(MASS) # Get the "ships" data frame
-data(ships)
-ships = ships
-detach("package:MASS")
+data("ships", package = "MASS")
Shipmodel <- vglm(incidents ~ type + year + period,
quasipoissonff, offset = log(service),
@@ -214,7 +210,7 @@ fit1 <- rcam(Qvar(Shipmodel, "type"), normal1("explink"), maxit = 99)
# Another form of input
fit2 <- rcam(Qvar(Shipmodel, coef.ind = c(0,2:5), reference.name = "typeA"),
normal1("explink"), maxit = 99)
-\dontrun{ plotqvar(fit2, lcol = "blue", llwd = 2, las = 1) }
+\dontrun{ plotqvar(fit2, col = "orange", lwd = 3, scol = "blue", slwd = 2, las = 1) }
# The variance-covariance matrix is another form of input (not recommended)
fit3 <- rcam(Qvar(cbind(0, rbind(0, vcov(Shipmodel)[2:5, 2:5])),
diff --git a/man/SurvS4-class.Rd b/man/SurvS4-class.Rd
index ef744d7..3af8361 100644
--- a/man/SurvS4-class.Rd
+++ b/man/SurvS4-class.Rd
@@ -1,7 +1,7 @@
\name{SurvS4-class}
\docType{class}
\alias{SurvS4-class}
-\alias{print,SurvS4-method}
+%%%% 20120216 \alias{print,SurvS4-method}
\alias{show,SurvS4-method}
\title{Class "SurvS4" }
@@ -16,24 +16,30 @@ Class \code{"\linkS4class{structure}"}, by class "matrix", distance 2.
Class \code{"\linkS4class{array}"}, by class "matrix", distance 2.
Class \code{"\linkS4class{vector}"}, by class "matrix", distance 3, with explicit coerce.
Class \code{"\linkS4class{vector}"}, by class "matrix", distance 4, with explicit coerce.
+
}
\section{Methods}{
\describe{
- \item{print}{\code{signature(x = "SurvS4")}: ... }
+% \item{print}{\code{signature(x = "SurvS4")}: ... }
\item{show}{\code{signature(object = "SurvS4")}: ... }
- }
+ }
}
\references{
See \pkg{survival}.
+
+
}
\author{
T. W. Yee.
+
+
}
\note{
The purpose of having \code{\link{SurvS4}} in \pkg{VGAM} is so that
the same input can be fed into \code{\link{vglm}} as functions in
\pkg{survival} such as \code{\link[survival]{survreg}}.
+
}
\section{Warning }{
@@ -44,6 +50,7 @@ Class \code{"\linkS4class{vector}"}, by class "matrix", distance 4, with explici
\seealso{
\code{\link{SurvS4}}.
% or \code{\linkS4class{CLASSNAME}} for links to other classes
+
}
\examples{
showClass("SurvS4")
diff --git a/man/SurvS4.Rd b/man/SurvS4.Rd
index 80f1d55..d65b9cb 100644
--- a/man/SurvS4.Rd
+++ b/man/SurvS4.Rd
@@ -1,7 +1,8 @@
\name{SurvS4}
\alias{SurvS4}
\alias{is.SurvS4}
-\alias{print.SurvS4}
+%%%% 20120216 \alias{print.SurvS4}
+\alias{show.SurvS4}
\alias{Math.SurvS4}
\alias{Summary.SurvS4}
\alias{[.SurvS4}
@@ -14,8 +15,8 @@
Create a Survival Object
}
\description{
- Create a survival object, usually used as a response variable in a
- model formula.
+ Create a survival object, usually used as a response
+ variable in a model formula.
}
\usage{
@@ -26,9 +27,13 @@ is.SurvS4(x)
\item{time}{
for right censored data, this is the follow up time. For interval
data, the first argument is the starting time for the interval.
+
+
}
\item{x}{
any R object.
+
+
}
\item{event}{
The status indicator, normally 0=alive, 1=dead. Other choices are
@@ -37,6 +42,8 @@ is.SurvS4(x)
1=event at \code{time}, 2=left censored, 3=interval censored.
Although unusual, the event indicator can be omitted, in which case
all subjects are assumed to have an event.
+
+
}
\item{time2}{
ending time of the interval for interval censored or counting
@@ -44,6 +51,8 @@ is.SurvS4(x)
closed on the right, \code{(start, end]}. For counting process
data, \code{event} indicates whether an event occurred at the end of
the interval.
+
+
}
\item{type}{
character string specifying the type of censoring. Possible values
@@ -51,21 +60,28 @@ is.SurvS4(x)
\code{"interval"}, or \code{"interval2"}. The default is
\code{"right"} or \code{"counting"} depending on whether the
\code{time2} argument is absent or present, respectively.
+
+
}
\item{origin}{
for counting process data, the hazard function origin. This is most
often used in conjunction with a model containing time dependent
strata in order to align the subjects properly when they cross over
- from one strata to another.}
+ from one strata to another.
+
+
+ }
}
\value{
An object of class \code{SurvS4} (formerly \code{Surv}).
- There are methods for \code{print},
- \code{is.na}, and subscripting survival objects. \code{SurvS4} objects
- are implemented as a matrix of 2 or 3 columns.
+ There are methods for \code{print}, \code{is.na}, and
+ subscripting survival objects. \code{SurvS4} objects are
+ implemented as a matrix of 2 or 3 columns.
+
+ In the case of \code{is.SurvS4}, a logical value
+ \code{TRUE} if \code{x} inherits from class
+ \code{"SurvS4"}, otherwise a \code{FALSE}.
- In the case of \code{is.SurvS4}, a logical value \code{TRUE} if \code{x}
- inherits from class \code{"SurvS4"}, otherwise a \code{FALSE}.
}
\details{
@@ -74,39 +90,42 @@ is.SurvS4(x)
SurvS4(time, event)
SurvS4(time, time2, event, type=, origin=0)
}
-
-In theory it is possible to represent interval censored data without a
-third column containing the explicit status. Exact, right censored,
-left censored and interval censored observation would be represented as
-intervals of (a,a), (a, infinity), (-infinity,b), and (a,b) respectively;
-each specifying the interval within which the event is known to have occurred.
-
-
-If \code{type = "interval2"} then the representation given above is
-assumed, with NA taking the place of infinity. If `type="interval"
-\code{event} must be given.
-If \code{event} is \code{0}, \code{1}, or \code{2}, the relevant
-information is assumed to be contained in \code{time},
-the value in \code{time2}
-is ignored, and the second column of the result will contain a
-placeholder.
-
-
-Presently, the only methods allowing interval censored data are the
-parametric models computed by \code{\link[survival]{survreg}},
-so the distinction between open and closed intervals
-is unimportant.
-The distinction is important for counting process data and
-the Cox model.
-
-
-The function tries to distinguish between the use of 0/1 and 1/2 coding for
-left and right censored data using \code{if (max(status)==2)}.
-If 1/2 coding is used and all the subjects are censored, it will
-guess wrong. Use 0/1 coding in this case.
-}
+In theory it is possible to represent interval censored
+data without a third column containing the explicit status.
+Exact, right censored, left censored and interval censored
+observation would be represented as intervals of (a,a),
+(a, infinity), (-infinity,b), and (a,b) respectively; each
+specifying the interval within which the event is known to
+have occurred.
+
+
+If \code{type = "interval2"} then the representation given
+above is assumed, with NA taking the place of infinity.
+If `type="interval" \code{event} must be given.
+If \code{event} is \code{0}, \code{1}, or \code{2},
+the relevant information is assumed to be contained in
+\code{time}, the value in \code{time2} is ignored, and the
+second column of the result will contain a placeholder.
+
+
+Presently, the only methods allowing interval
+censored data are the parametric models computed by
+\code{\link[survival]{survreg}}, so the distinction between
+open and closed intervals is unimportant. The distinction
+is important for counting process data and the Cox model.
+
+
+The function tries to distinguish between the use of 0/1
+and 1/2 coding for left and right censored data using
+\code{if (max(status)==2)}. If 1/2 coding is used and all
+the subjects are censored, it will guess wrong. Use 0/1
+coding in this case.
+
+
+}
+
\author{
The code and documentation comes from \pkg{survival}.
@@ -120,6 +139,7 @@ guess wrong. Use 0/1 coding in this case.
All \pkg{VGAM} family functions beginning with \code{"cen"} require
the packaging function \code{Surv} to format the input.
+
}
\note{
The purpose of having \code{SurvS4} in \pkg{VGAM} is so that
@@ -128,11 +148,13 @@ guess wrong. Use 0/1 coding in this case.
name has been changed from \code{"Surv"} to \code{"SurvS4"}; see
\code{\link{SurvS4-class}}.
+
The format \code{J+} is interpreted in \pkg{VGAM} as \eqn{\ge J}.
If \code{type="interval"} then these should not be used in \pkg{VGAM}:
\code{(L,U-]} or \code{(L,U+]}.
% zz is this for type="count" only?
+
}
\seealso{
@@ -142,6 +164,8 @@ guess wrong. Use 0/1 coding in this case.
% \code{\link[survival]{survfit}},
\code{\link[survival]{survreg}},
\code{\link{leukemia}}.
+
+
}
\examples{
with(leukemia, SurvS4(time, status))
diff --git a/man/VGAM-package.Rd b/man/VGAM-package.Rd
index d830e1c..e329b84 100644
--- a/man/VGAM-package.Rd
+++ b/man/VGAM-package.Rd
@@ -156,12 +156,12 @@ summary(fit)
# Example 2; zero-inflated Poisson model
-zipdat = data.frame(x = runif(nn <- 2000))
-zipdat = transform(zipdat, phi = logit(-0.5 + 1*x, inverse = TRUE),
- lambda = loge( 0.5 + 2*x, inverse = TRUE))
-zipdat = transform(zipdat, y = rzipois(nn, lambda, phi))
-with(zipdat, table(y))
-fit = vglm(y ~ x, zipoisson, zipdat, trace = TRUE)
+zdata = data.frame(x2 = runif(nn <- 2000))
+zdata = transform(zdata, pstr0 = logit(-0.5 + 1*x2, inverse = TRUE),
+ lambda = loge( 0.5 + 2*x2, inverse = TRUE))
+zdata = transform(zdata, y = rzipois(nn, lambda, pstr0 = pstr0))
+with(zdata, table(y))
+fit = vglm(y ~ x2, zipoisson, zdata, trace = TRUE)
coef(fit, matrix = TRUE) # These should agree with the above values
@@ -179,11 +179,11 @@ with(hunua, rug(altitude)) }
# Example 4; LMS quantile regression
-fit = vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), dat = bminz,
+fit = vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), dat = bmi.nz,
trace = TRUE)
head(predict(fit))
head(fitted(fit))
-head(bminz) # Person 1 is near the lower quartile among people his age
+head(bmi.nz) # Person 1 is near the lower quartile among people his age
head(cdf(fit))
\dontrun{ par(mfrow = c(1, 1), bty = "l", mar = c(5,4,4,3)+0.1, xpd = TRUE)
diff --git a/man/alaplace3.Rd b/man/alaplace3.Rd
index 7abe2b9..c2661e6 100644
--- a/man/alaplace3.Rd
+++ b/man/alaplace3.Rd
@@ -214,7 +214,7 @@ economics, engineering, and finance},
Boston: Birkhauser.
- Yee, T. W. (2011)
+ Yee, T. W. (2012)
Quantile regression for counts and proportions.
In preparation.
@@ -264,7 +264,7 @@ Boston: Birkhauser.
A second method for solving the noncrossing quantile problem is
illustrated below in Example 3.
This is called the \emph{accumulative quantile method} (AQM)
- and details are in Yee (2011).
+ and details are in Yee (2012).
It does not make the strong parallelism assumption.
diff --git a/man/amlnormal.Rd b/man/amlnormal.Rd
index ad9577d..12ea2fe 100644
--- a/man/amlnormal.Rd
+++ b/man/amlnormal.Rd
@@ -129,7 +129,7 @@ amlnormal(w.aml = 1, parallel = FALSE, lexpectile = "identity",
\code{\link{amlpoisson}},
\code{\link{amlbinomial}},
\code{\link{amlexponential}},
- \code{\link{bminz}},
+ \code{\link{bmi.nz}},
\code{\link{alaplace1}},
\code{\link{denorm}},
\code{\link{lms.bcn}} and similar variants are alternative
@@ -139,47 +139,47 @@ amlnormal(w.aml = 1, parallel = FALSE, lexpectile = "identity",
\examples{
# Example 1
-ooo = with(bminz, order(age))
-bminz = bminz[ooo,] # Sort by age
-(fit = vglm(BMI ~ bs(age), fam=amlnormal(w.aml=0.1), bminz))
+ooo = with(bmi.nz, order(age))
+bmi.nz = bmi.nz[ooo,] # Sort by age
+(fit = vglm(BMI ~ bs(age), fam=amlnormal(w.aml=0.1), bmi.nz))
fit at extra # Gives the w value and the percentile
coef(fit, matrix=TRUE)
\dontrun{
# Quantile plot
-with(bminz, plot(age, BMI, col="blue", main=
+with(bmi.nz, plot(age, BMI, col="blue", main=
paste(round(fit at extra$percentile, dig=1),
"expectile-percentile curve")))
-with(bminz, lines(age, c(fitted(fit)), col="black")) }
+with(bmi.nz, lines(age, c(fitted(fit)), col="black")) }
# Example 2
# Find the w values that give the 25, 50 and 75 percentiles
findw = function(w, percentile=50) {
- fit2 = vglm(BMI ~ bs(age), fam=amlnormal(w=w), data=bminz)
+ fit2 = vglm(BMI ~ bs(age), fam=amlnormal(w=w), data=bmi.nz)
fit2 at extra$percentile - percentile
}
\dontrun{
# Quantile plot
-with(bminz, plot(age, BMI, col="blue", las=1, main=
+with(bmi.nz, plot(age, BMI, col="blue", las=1, main=
"25, 50 and 75 expectile-percentile curves")) }
for(myp in c(25,50,75)) {
# Note: uniroot() can only find one root at a time
bestw = uniroot(f=findw, interval=c(1/10^4, 10^4), percentile=myp)
- fit2 = vglm(BMI ~ bs(age), fam=amlnormal(w=bestw$root), data=bminz)
+ fit2 = vglm(BMI ~ bs(age), fam=amlnormal(w=bestw$root), data=bmi.nz)
\dontrun{
- with(bminz, lines(age, c(fitted(fit2)), col="red")) }
+ with(bmi.nz, lines(age, c(fitted(fit2)), col="red")) }
}
# Example 3; this is Example 1 but with smoothing splines and
# a vector w and a parallelism assumption.
-ooo = with(bminz, order(age))
-bminz = bminz[ooo,] # Sort by age
+ooo = with(bmi.nz, order(age))
+bmi.nz = bmi.nz[ooo,] # Sort by age
fit3 = vgam(BMI ~ s(age, df=4), fam=amlnormal(w=c(.1,1,10), parallel=TRUE),
- bminz, trac=TRUE)
+ bmi.nz, trac=TRUE)
fit3 at extra # The w values, percentiles and weighted deviances
# The linear components of the fit; not for human consumption:
@@ -187,11 +187,11 @@ coef(fit3, matrix=TRUE)
\dontrun{
# Quantile plot
-with(bminz, plot(age, BMI, col="blue", main=
+with(bmi.nz, plot(age, BMI, col="blue", main=
paste(paste(round(fit3 at extra$percentile, dig=1), collapse=", "),
"expectile-percentile curves")))
-with(bminz, matlines(age, fitted(fit3), col=1:fit3 at extra$M, lwd=2))
-with(bminz, lines(age, c(fitted(fit )), col="black")) # For comparison
+with(bmi.nz, matlines(age, fitted(fit3), col=1:fit3 at extra$M, lwd=2))
+with(bmi.nz, lines(age, c(fitted(fit )), col="black")) # For comparison
}
}
\keyword{models}
diff --git a/man/benfUC.Rd b/man/benfUC.Rd
index ead2657..1f55710 100644
--- a/man/benfUC.Rd
+++ b/man/benfUC.Rd
@@ -38,7 +38,7 @@ rbenf(n, ndigits = 1)
}
\item{log, log.p}{
Logical.
- If \code{log.p=TRUE} then all probabilities \code{p} are
+ If \code{log.p = TRUE} then all probabilities \code{p} are
given as \code{log(p)}.
}
@@ -115,16 +115,16 @@ pbenf(x)
\dontrun{
xx = 1:9; # par(mfrow=c(2,1))
-barplot(dbenf(xx), col = "lightblue", las=1, xlab="Leading digit",
+barplot(dbenf(xx), col = "lightblue", las = 1, xlab = "Leading digit",
ylab = "Probability", names.arg = as.character(xx),
- main=paste("Benford's distribution", sep=""))
-
-hist(rbenf(n=1000), border = "blue", prob=TRUE,
- main="1000 random variates from Benford's distribution",
- xlab="Leading digit", sub="Red is the true probability",
- breaks=0:9+0.5, ylim=c(0, 0.35), xlim=c(0, 10.0))
-lines(xx, dbenf(xx), col="red", type="h")
-points(xx, dbenf(xx), col="red")
+ main = paste("Benford's distribution", sep = ""))
+
+hist(rbenf(n = 1000), border = "blue", prob = TRUE,
+ main = "1000 random variates from Benford's distribution",
+ xlab = "Leading digit", sub="Red is the true probability",
+ breaks = 0:9 + 0.5, ylim = c(0, 0.35), xlim = c(0, 10.0))
+lines(xx, dbenf(xx), col = "red", type = "h")
+points(xx, dbenf(xx), col = "red")
}
}
\keyword{distribution}
diff --git a/man/benini.Rd b/man/benini.Rd
index b789935..2e0e17c 100644
--- a/man/benini.Rd
+++ b/man/benini.Rd
@@ -69,9 +69,10 @@ benini(y0 = stop("argument 'y0' must be specified"),
}
\references{
+
Kleiber, C. and Kotz, S. (2003)
\emph{Statistical Size Distributions in Economics and Actuarial Sciences},
-Hoboken, NJ: Wiley-Interscience.
+Hoboken, NJ, USA: Wiley-Interscience.
}
\author{ T. W. Yee }
diff --git a/man/beniniUC.Rd b/man/beniniUC.Rd
index b8e3a14..87bbeed 100644
--- a/man/beniniUC.Rd
+++ b/man/beniniUC.Rd
@@ -41,7 +41,7 @@ rbenini(n, shape, y0)
Kleiber, C. and Kotz, S. (2003)
\emph{Statistical Size Distributions in Economics and
Actuarial Sciences},
-Hoboken, NJ: Wiley-Interscience.
+Hoboken, NJ, USA: Wiley-Interscience.
}
\author{ T. W. Yee }
diff --git a/man/beta.ab.Rd b/man/beta.ab.Rd
index 12eb466..75f3b48 100644
--- a/man/beta.ab.Rd
+++ b/man/beta.ab.Rd
@@ -136,24 +136,24 @@ beta.ab(lshape1 = "loge", lshape2 = "loge",
}
\examples{
-betadat = data.frame(y = rbeta(n=1000, shape1=exp(0), shape2=exp(1)))
-fit = vglm(y ~ 1, beta.ab(lshape1="identity", lshape2="identity"),
- data=betadat, trace=TRUE, crit="c")
-fit = vglm(y ~ 1, beta.ab, data=betadat, trace=TRUE, crit="c")
-coef(fit, matrix=TRUE)
+bdata = data.frame(y = rbeta(n = 1000, shape1 = exp(0), shape2 = exp(1)))
+fit = vglm(y ~ 1, beta.ab(lshape1 = "identity", lshape2 = "identity"),
+ data = bdata, trace = TRUE, crit = "coef")
+fit = vglm(y ~ 1, beta.ab, bdata, trace = TRUE, crit = "coef")
+coef(fit, matrix = TRUE)
Coef(fit) # Useful for intercept-only models
-betadat = transform(betadat, Y = 5 + 8 * y) # From 5 to 13, not 0 to 1
-fit = vglm(Y ~ 1, beta.ab(A=5, B=13), data=betadat, trace=TRUE)
+bdata = transform(bdata, Y = 5 + 8 * y) # From 5 to 13, not 0 to 1
+fit = vglm(Y ~ 1, beta.ab(A = 5, B = 13), bdata, trace = TRUE)
Coef(fit)
-c(meanY=with(betadat, mean(Y)), head(fitted(fit),2))
+c(meanY = with(bdata, mean(Y)), head(fitted(fit),2))
}
\keyword{models}
\keyword{regression}
% 3/1/06; this works well:
-% fit=vglm(y~1, beta.abqn(link=logoff,earg=list(offset=1)), tr=TRUE, cri="c")
+% fit = vglm(y~1, beta.abqn(link = logoff,earg = list(offset = 1)), tr = TRUE, cri = "c")
% 3/1/06; this does not work so well:
-% it=vglm(y~1, beta.abqn(link=logoff,earg=list(offset=0)), tr=TRUE, cri="c")
+% it = vglm(y~1, beta.abqn(link = logoff,earg = list(offset = 0)), tr = TRUE, cri = "c")
% Interesting!!
diff --git a/man/betaII.Rd b/man/betaII.Rd
index 43d3d6f..72f4f7e 100644
--- a/man/betaII.Rd
+++ b/man/betaII.Rd
@@ -7,24 +7,24 @@
beta II distribution.
}
\usage{
-betaII(link.scale = "loge", link.p = "loge", link.q = "loge",
- earg.scale=list(), earg.p=list(), earg.q=list(),
- init.scale = NULL, init.p = 1, init.q = 1, zero = NULL)
+betaII(lscale = "loge", lshape2.p = "loge", lshape3.q = "loge",
+ escale = list(), eshape2.p = list(), eshape3.q = list(),
+ iscale = NULL, ishape2.p = 2, ishape3.q = 2, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link.scale, link.p, link.q}{
+ \item{lscale, lshape2.p, lshape3.q}{
Parameter link functions applied to the
(positive) parameters \code{scale}, \code{p} and \code{q}.
See \code{\link{Links}} for more choices.
}
- \item{earg.scale, earg.p, earg.q}{
+ \item{escale, eshape2.p, eshape3.q}{
List. Extra argument for each of the links.
See \code{earg} in \code{\link{Links}} for general information.
}
- \item{init.scale, init.p, init.q}{
+ \item{iscale, ishape2.p, ishape3.q}{
Optional initial values for \code{scale}, \code{p} and \code{q}.
}
@@ -44,6 +44,7 @@ betaII(link.scale = "loge", link.p = "loge", link.q = "loge",
the Lomax (\eqn{p=1}) and inverse Lomax (\eqn{q=1}).
More details can be found in Kleiber and Kotz (2003).
+
The beta II distribution has density
\deqn{f(y) = y^{p-1} / [b^p B(p,q) \{1 + y/b\}^{p+q}]}{%
f(y) = y^(p-1) / [b^p B(p,q) (1 + y/b)^(p+q)]}
@@ -53,18 +54,20 @@ and the others are shape parameters.
The mean is
\deqn{E(Y) = b \, \Gamma(p + 1) \, \Gamma(q - 1) / (\Gamma(p) \, \Gamma(q))}{%
E(Y) = b gamma(p + 1) gamma(q - 1) / ( gamma(p) gamma(q))}
-provided \eqn{q > 1}.
+provided \eqn{q > 1}; these are returned as the fitted values.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
Kleiber, C. and Kotz, S. (2003)
\emph{Statistical Size Distributions in Economics and Actuarial Sciences},
-Hoboken, NJ: Wiley-Interscience.
+Hoboken, NJ, USA: Wiley-Interscience.
}
@@ -91,10 +94,11 @@ Hoboken, NJ: Wiley-Interscience.
}
\examples{
-bdata = data.frame(y = rsinmad(n=2000, a=1, 6, 2)) # Not genuine data!
-fit = vglm(y ~ 1, betaII, bdata, trace=TRUE)
-fit = vglm(y ~ 1, betaII(init.p=0.7, init.q=0.7), bdata, trace=TRUE, crit="c")
-coef(fit, mat=TRUE)
+bdata = data.frame(y = rsinmad(2000, shape1.a = 1, 6, 2)) # Not genuine data!
+fit = vglm(y ~ 1, betaII, bdata, trace = TRUE)
+fit = vglm(y ~ 1, betaII(ishape2.p = 0.7, ishape3.q = 0.7),
+ bdata, trace = TRUE, crit = "coef")
+coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
}
diff --git a/man/betabinomial.ab.Rd b/man/betabinomial.ab.Rd
index 57de43b..3976150 100644
--- a/man/betabinomial.ab.Rd
+++ b/man/betabinomial.ab.Rd
@@ -109,10 +109,10 @@ betabinomial.ab(lshape12 = "loge", earg = list(), i1 = 1, i2 = NULL,
Suppose \code{fit} is a fitted beta-binomial model. Then
- \code{fit at y} contains the sample proportions \eqn{y},
- \code{fitted(fit)} returns estimates of \eqn{E(Y)}, and
- \code{weights(fit, type = "prior")} returns the number
- of trials \eqn{N}.
+ \code{fit at y} (better: \code{depvar(fit)}) contains the sample
+ proportions \eqn{y}, \code{fitted(fit)} returns estimates of
+ \eqn{E(Y)}, and \code{weights(fit, type = "prior")} returns
+ the number of trials \eqn{N}.
}
@@ -192,7 +192,7 @@ fit = vglm(cbind(y, N-y) ~ 1, betabinomial.ab, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
head(fit at misc$rho) # The correlation parameter
-head(cbind(fit at y, weights(fit, type = "prior")))
+head(cbind(depvar(fit), weights(fit, type = "prior")))
# Example 2
@@ -202,7 +202,7 @@ coef(fit, matrix = TRUE)
Coef(fit)
fit at misc$rho # The correlation parameter
t(fitted(fit))
-t(fit at y)
+t(depvar(fit))
t(weights(fit, type = "prior"))
# A "loge" link for the 2 shape parameters is a logistic regression:
all.equal(c(fitted(fit)),
diff --git a/man/bilogistic4.Rd b/man/bilogistic4.Rd
index bf87117..6a22731 100644
--- a/man/bilogistic4.Rd
+++ b/man/bilogistic4.Rd
@@ -71,11 +71,12 @@ bilogistic4(llocation = "identity", lscale = "loge",
P(Y1 <= y1) = F(y1;l1,s1) = 1 / (1 + exp[-(y1-l1)/s1]).
}
+
By default, \eqn{\eta_1=l_1}{eta1=l1},
\eqn{\eta_2=\log(s_1)}{eta2=log(s1)},
\eqn{\eta_3=l_2}{eta3=l2},
- \eqn{\eta_4=\log(s_2)}{eta4=log(s2)} are the linear/additive
- predictors.
+ \eqn{\eta_4=\log(s_2)}{eta4=log(s2)} are the linear/additive predictors.
+
}
\value{
@@ -83,17 +84,21 @@ bilogistic4(llocation = "identity", lscale = "loge",
The object is used by modelling functions such as \code{\link{vglm}},
\code{\link{rrvglm}} and \code{\link{vgam}}.
+
}
\references{
+
Gumbel, E. J. (1961)
Bivariate logistic distributions.
\emph{Journal of the American Statistical Association},
\bold{56}, 335--349.
+
Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005)
\emph{Extreme Value and Related Models with Applications in
Engineering and Science},
-Hoboken, N.J.: Wiley-Interscience.
+Hoboken, NJ, USA: Wiley-Interscience.
+
}
\author{ T. W. Yee }
@@ -104,6 +109,7 @@ Hoboken, N.J.: Wiley-Interscience.
therefore treated with caution; these are computed in functions such
as \code{vcov()} and \code{summary()}.
+
}
%\section{Warning }{
%}
@@ -111,6 +117,8 @@ Hoboken, N.J.: Wiley-Interscience.
\seealso{
\code{\link{logistic}},
\code{\link{rbilogis4}}.
+
+
}
\examples{
ymat = rbilogis4(n <- 1000, loc1 = 5, loc2 = 7, scale2 = exp(1))
@@ -120,7 +128,7 @@ coef(fit, matrix = TRUE)
Coef(fit)
head(fitted(fit))
vcov(fit)
-head(weights(fit, type = "w"))
+head(weights(fit, type = "work"))
summary(fit)
}
\keyword{models}
diff --git a/man/binom2.or.Rd b/man/binom2.or.Rd
index 96ce79b..fd86249 100644
--- a/man/binom2.or.Rd
+++ b/man/binom2.or.Rd
@@ -3,7 +3,8 @@
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Bivariate Binary Regression with an Odds Ratio (Family Function) }
\description{
- Fits a Palmgren (bivariate logistic regression) model to two binary
+ Fits a Palmgren (bivariate odds-ratio model, or
+ bivariate logistic regression) model to two binary
responses. Actually, a bivariate logistic/probit/cloglog/cauchit
model can be fitted.
The odds ratio is used as a measure of dependency.
@@ -11,9 +12,9 @@
}
\usage{
binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
- emu=list(), emu1=emu, emu2=emu, eoratio=list(),
- imu1=NULL, imu2=NULL, ioratio = NULL, zero = 3,
- exchangeable = FALSE, tol = 0.001, morerobust=FALSE)
+ emu = list(), emu1 = emu, emu2 = emu, eoratio = list(),
+ imu1 = NULL, imu2 = NULL, ioratio = NULL, zero = 3,
+ exchangeable = FALSE, tol = 0.001, morerobust = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -93,7 +94,7 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
i.e., an intercept-only model, but this can be circumvented by setting
\code{zero = NULL} in order to model the odds ratio as a function of all the
explanatory variables.
- The function \code{binom2.or} can handle other probability link
+ The function \code{binom2.or()} can handle other probability link
functions such as \code{\link{probit}},
\code{\link{cloglog}} and \code{\link{cauchit}} links as well, so is
quite general. In fact, the two marginal probabilities can each have
@@ -153,6 +154,8 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
}
\author{ Thomas W. Yee }
\note{
+ At present we call \code{\link{binom2.or}} families a
+ \emph{bivariate odds-ratio model}.
The response should be either a 4-column matrix of counts
(whose columns correspond to \eqn{(Y_1,Y_2)}{(Y1,Y2)} = (0,0), (0,1), (1,0),
(1,1) respectively), or a two-column matrix where each column has two
@@ -162,8 +165,8 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
possible outcomes.
- By default, a constant odds ratio is fitted because \code{zero=3}.
- Set \code{zero=NULL} if you want the odds ratio to be modelled as a
+ By default, a constant odds ratio is fitted because \code{zero = 3}.
+ Set \code{zero = NULL} if you want the odds ratio to be modelled as a
function of the explanatory variables; however, numerical problems
are more likely to occur.
@@ -174,7 +177,7 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
Users who want a different link function for each of the two marginal
probabilities should use the \code{lmu1} and \code{lmu2} arguments,
and the argument \code{lmu} is then ignored. It doesn't make sense
- to specify \code{exchangeable=TRUE} and have different link functions
+ to specify \code{exchangeable = TRUE} and have different link functions
for the two marginal probabilities.
@@ -184,6 +187,7 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
to be entered into an exchangeable \code{\link{binom2.or}} model.
See the author's webpage for sample code.
+
}
\seealso{
\code{\link{rbinom2.or}},
@@ -197,24 +201,46 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
\code{\link{cloglog}},
\code{\link{cauchit}}.
+
}
\examples{
# Fit the model in Table 6.7 in McCullagh and Nelder (1989)
coalminers = transform(coalminers, Age = (age - 42) / 5)
-fit = vglm(cbind(nBnW,nBW,BnW,BW) ~ Age, binom2.or(zero=NULL), coalminers)
+fit = vglm(cbind(nBnW,nBW,BnW,BW) ~ Age, binom2.or(zero = NULL), coalminers)
fitted(fit)
summary(fit)
-coef(fit, matrix=TRUE)
-c(weights(fit, type="prior")) * fitted(fit) # Table 6.8
+coef(fit, matrix = TRUE)
+c(weights(fit, type = "prior")) * fitted(fit) # Table 6.8
-\dontrun{ with(coalminers, matplot(Age, fitted(fit), type="l", las=1,
- xlab="(age - 42) / 5", lwd=2))
+\dontrun{ with(coalminers, matplot(Age, fitted(fit), type = "l", las = 1,
+ xlab = "(age - 42) / 5", lwd = 2))
with(coalminers, matpoints(Age, fit at y, col=1:4))
-legend(x=-4, y=0.5, lty=1:4, col=1:4, lwd=2,
+legend(x = -4, y = 0.5, lty = 1:4, col = 1:4, lwd = 2,
legend=c("1 = (Breathlessness=0, Wheeze=0)",
"2 = (Breathlessness=0, Wheeze=1)",
"3 = (Breathlessness=1, Wheeze=0)",
"4 = (Breathlessness=1, Wheeze=1)")) }
+
+
+# Another model: pet ownership
+petdata = subset(xs.nz, ethnic == "0" & age < 70 & sex == "M") # More homogeneous
+petdata = na.omit(petdata[, c("cat", "dog", "age")])
+summary(petdata)
+with(petdata, table(cat, dog)) # Can compute the odds ratio
+
+fit = vgam(cbind((1-cat)*(1-dog), (1-cat)*dog,
+ cat*(1-dog), cat*dog) ~ s(age, df = 5),
+ binom2.or(zero = 3), data = petdata, trace = TRUE)
+colSums(depvar(fit))
+coef(fit, matrix = TRUE)
+
+\dontrun{ # Plot the estimated probabilities
+ooo = order(with(petdata, age))
+matplot(with(petdata, age)[ooo], fitted(fit)[ooo, ], type = "l",
+ xlab = "Age", ylab = "Probability", main = "Pet ownership",
+ ylim = c(0, max(fitted(fit))), las = 1, lwd = 1.5)
+legend("topleft", col=1:4, lty = 1:4, leg = c("no cat or dog ",
+ "dog only", "cat only", "cat and dog"), lwd = 1.5) }
}
\keyword{models}
\keyword{regression}
diff --git a/man/binomialff.Rd b/man/binomialff.Rd
index b34eda7..0d714b3 100644
--- a/man/binomialff.Rd
+++ b/man/binomialff.Rd
@@ -113,7 +113,7 @@ binomialff(link = "logit", earg = list(), dispersion = 1, mv = FALSE,
Altman, M. and Gill, J. and McDonald, M. P. (2004)
\emph{Numerical Issues in Statistical Computing for the Social
- Scientist}, Hoboken, NJ: Wiley-Interscience.
+ Scientist}, Hoboken, NJ, USA: Wiley-Interscience.
Ridout, M. S. (1990)
@@ -223,7 +223,7 @@ with(shunua, matplot(altitude, fitted(fit2), type = "l",
ridout = data.frame(v = c(1000, 100, 10), r = c(4, 3, 3), n = c(5, 5, 5))
(ridout = transform(ridout, logv = log(v)))
# The iterations oscillates between two local solutions:
-glm.fail = glm(r/n ~ offset(logv) + 1, weight = n,
+glm.fail = glm(r / n ~ offset(logv) + 1, weight = n,
binomial(link = cloglog), ridout, trace = TRUE)
coef(glm.fail)
# vglm()'s half-stepping ensures the MLE of -5.4007 is obtained:
diff --git a/man/bivgamma.mckay.Rd b/man/bivgamma.mckay.Rd
index 27e423a..2e0a988 100644
--- a/man/bivgamma.mckay.Rd
+++ b/man/bivgamma.mckay.Rd
@@ -124,7 +124,7 @@ coef(fit, matrix = TRUE)
Coef(fit)
vcov(fit)
-colMeans(fit at y) # Check moments
+colMeans(depvar(fit)) # Check moments
head(fitted(fit), 1)
}
\keyword{models}
diff --git a/man/bminz.Rd b/man/bmi.nz.Rd
similarity index 78%
rename from man/bminz.Rd
rename to man/bmi.nz.Rd
index efc165f..11e5464 100644
--- a/man/bminz.Rd
+++ b/man/bmi.nz.Rd
@@ -1,12 +1,13 @@
-\name{bminz}
-\alias{bminz}
+\name{bmi.nz}
+\alias{bmi.nz}
\docType{data}
\title{ Body Mass Index of New Zealand Adults Data}
\description{
The body mass indexes and ages from an approximate random
sample of 700 New Zealand adults.
+
}
-\usage{data(bminz)}
+\usage{data(bmi.nz)}
\format{
A data frame with 700 observations on the following 2 variables.
\describe{
@@ -20,14 +21,22 @@
They are a random sample from the Fletcher Challenge/Auckland Heart and
Health survey conducted in the early 1990s.
+
There are some outliers in the data set.
+
A variable \code{gender} would be useful, and may be added later.
+
+
}
\source{
- Clinical Trials Research Unit, University of Auckland, New Zealand.
+ Clinical Trials Research Unit, University of Auckland, New Zealand,
+ \url{http://www.ctru.auckland.ac.nz}.
+
+
}
\references{
+
MacMahon, S., Norton, R., Jackson, R., Mackie, M. J.,
Cheng, A., Vander Hoorn, S., Milne, A., McCulloch, A. (1995)
Fletcher Challenge-University of Auckland Heart &
@@ -35,12 +44,11 @@ Health Study: design and baseline findings.
\emph{New Zealand Medical Journal},
\bold{108}, 499--502.
+
}
\examples{
-\dontrun{
-with(bminz, plot(age, BMI, col="blue"))
-fit = vgam(BMI ~ s(age, df=c(2,4,2)), fam=lms.yjn, bminz, tr=TRUE)
-qtplot(fit, pcol="blue", tcol="brown", lcol="brown")
-}
+\dontrun{ with(bmi.nz, plot(age, BMI, col = "blue"))
+fit = vgam(BMI ~ s(age, df = c(2, 4, 2)), fam = lms.yjn, bmi.nz, trace = TRUE)
+qtplot(fit, pcol = "blue", tcol = "brown", lcol = "brown") }
}
\keyword{datasets}
diff --git a/man/brat.Rd b/man/brat.Rd
index 769d7dc..f82fe84 100644
--- a/man/brat.Rd
+++ b/man/brat.Rd
@@ -12,113 +12,143 @@ brat(refgp = "last", refvalue = 1, init.alpha = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{refgp}{ Integer whose value must be from the set
- \{1,\ldots,\eqn{M+1}\},
- where there are \eqn{M+1} competitors. The default value indicates
- the last competitor is used---but don't input a character string, in general.
+ \item{refgp}{
+ Integer whose value must be from the set
+ \{1,\ldots,\eqn{M+1}\}, where there are \eqn{M+1}
+ competitors. The default value indicates the last
+ competitor is used---but don't input a character string,
+ in general.
+
+
+ }
+ \item{refvalue}{
+ Numeric. A positive value for the reference group.
+
+
}
- \item{refvalue}{ Numeric. A positive value for the reference group. }
- \item{init.alpha}{ Initial values for the \eqn{\alpha}{alpha}s.
+ \item{init.alpha}{
+ Initial values for the \eqn{\alpha}{alpha}s.
These are recycled to the appropriate length.
+
}
}
\details{
- The Bradley Terry model involves \eqn{M+1} competitors who
- either win or lose against each other (no draws/ties allowed in
- this implementation--see \code{\link{bratt}} if there are ties).
- The probability that Competitor \eqn{i} beats Competitor \eqn{j}
- is \eqn{\alpha_i / (\alpha_i+\alpha_j)}{alpha_i / (alpha_i +
- alpha_j)}, where all the \eqn{\alpha}{alpha}s are positive. Loosely,
- the \eqn{\alpha}{alpha}s can be thought of as the competitors'
- `abilities'. For identifiability, one of the \eqn{\alpha_i}{alpha_i}
- is set to a known value \code{refvalue}, e.g., 1. By default, this
- function chooses the last competitor to have this reference value.
- The data can be represented in the form of a \eqn{M+1} by \eqn{M+1}
- matrix of counts, where winners are the rows and losers are the columns.
- However, this is not the way the data should be inputted (see below).
-
- Excluding the reference value/group, this function chooses
- \eqn{\log(\alpha_j)}{log(alpha_j)} as the \eqn{M} linear predictors.
- The log link ensures that the \eqn{\alpha}{alpha}s are positive.
-
- The Bradley Terry model can be fitted by logistic regression, but this
- approach is not taken here. The Bradley Terry model can be fitted
- with covariates, e.g., a home advantage variable, but unfortunately,
- this lies outside the VGLM theoretical framework and therefore cannot
- be handled with this code.
+ The Bradley Terry model involves \eqn{M+1} competitors
+ who either win or lose against each other (no draws/ties
+ allowed in this implementation--see \code{\link{bratt}}
+ if there are ties). The probability that Competitor
+ \eqn{i} beats Competitor \eqn{j} is \eqn{\alpha_i /
+ (\alpha_i+\alpha_j)}{alpha_i / (alpha_i + alpha_j)},
+ where all the \eqn{\alpha}{alpha}s are positive.
+ Loosely, the \eqn{\alpha}{alpha}s can be thought of as
+ the competitors' `abilities'. For identifiability, one
+ of the \eqn{\alpha_i}{alpha_i} is set to a known value
+ \code{refvalue}, e.g., 1. By default, this function
+ chooses the last competitor to have this reference value.
+ The data can be represented in the form of a \eqn{M+1}
+ by \eqn{M+1} matrix of counts, where winners are the
+ rows and losers are the columns. However, this is not
+ the way the data should be inputted (see below).
+
+
+ Excluding the reference value/group, this function
+ chooses \eqn{\log(\alpha_j)}{log(alpha_j)} as the
+ \eqn{M} linear predictors. The log link ensures that
+ the \eqn{\alpha}{alpha}s are positive.
+
+
+ The Bradley Terry model can be fitted by logistic
+ regression, but this approach is not taken here.
+ The Bradley Terry model can be fitted with covariates,
+ e.g., a home advantage variable, but unfortunately, this
+ lies outside the VGLM theoretical framework and therefore
+ cannot be handled with this code.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}.
+
}
\references{
Agresti, A. (2002)
\emph{Categorical Data Analysis},
2nd ed. New York: Wiley.
+
Stigler, S. (1994)
Citation patterns in the journals of statistics and probability.
\emph{Statistical Science},
\bold{9}, 94--108.
+
The \pkg{BradleyTerry2} package has more comprehensive capabilities
than this function.
+
}
\author{ T. W. Yee }
\note{
- The function \code{\link{Brat}} is useful for coercing a \eqn{M+1}
- by \eqn{M+1} matrix of counts into a one-row matrix suitable
- for \code{brat}. Diagonal elements are skipped, and the usual S
- order of \code{c(a.matrix)} of elements is used. There should be no
- missing values apart from the diagonal elements of the square matrix.
- The matrix should have winners as the rows, and losers as the columns.
- In general, the response should be a 1-row matrix with \eqn{M(M+1)}
- columns.
-
- Only an intercept model is recommended with \code{brat}. It doesn't
- make sense really to include covariates because of the limited VGLM
- framework.
+ The function \code{\link{Brat}} is useful for coercing
+ a \eqn{M+1} by \eqn{M+1} matrix of counts into a one-row
+ matrix suitable for \code{brat}. Diagonal elements are
+ skipped, and the usual S order of \code{c(a.matrix)}
+ of elements is used. There should be no missing values
+ apart from the diagonal elements of the square matrix.
+ The matrix should have winners as the rows, and losers
+ as the columns. In general, the response should be a
+ 1-row matrix with \eqn{M(M+1)} columns.
+
+
+ Only an intercept model is recommended with \code{brat}.
+ It doesn't make sense really to include covariates because
+ of the limited VGLM framework.
+
Notationally, note that the \pkg{VGAM} family function
- \code{\link{brat}} has \eqn{M+1} contestants, while \code{bratt}
- has \eqn{M} contestants.
+ \code{\link{brat}} has \eqn{M+1} contestants, while
+ \code{bratt} has \eqn{M} contestants.
+
}
\section{Warning }{
- Presently, the residuals are wrong, and the prior weights are not
- handled correctly. Ideally, the total number of counts should
- be the prior weights, after the response has been converted to
- proportions. This would make it similar to family functions such as
- \code{\link{multinomial}} and \code{\link{binomialff}}.
+ Presently, the residuals are wrong, and the prior weights
+ are not handled correctly. Ideally, the total number of
+ counts should be the prior weights, after the response has
+ been converted to proportions. This would make it similar
+ to family functions such as \code{\link{multinomial}}
+ and \code{\link{binomialff}}.
+
}
\seealso{
-\code{\link{bratt}},
-\code{\link{Brat}},
-\code{\link{multinomial}},
-\code{\link{binomialff}}.
+ \code{\link{bratt}},
+ \code{\link{Brat}},
+ \code{\link{multinomial}},
+ \code{\link{binomialff}}.
+
+
}
\examples{
-# citation statistics: being cited is a 'win'; citing is a 'loss'
+# Citation statistics: being cited is a 'win'; citing is a 'loss'
journal = c("Biometrika", "Comm.Statist", "JASA", "JRSS-B")
-m = matrix(c( NA, 33, 320, 284,
+mat = matrix(c( NA, 33, 320, 284,
730, NA, 813, 276,
498, 68, NA, 325,
221, 17, 142, NA), 4,4)
-dimnames(m) = list(winner = journal, loser = journal)
-fit = vglm(Brat(m) ~ 1, brat(refgp=1), trace=TRUE)
-fit = vglm(Brat(m) ~ 1, brat(refgp=1), trace=TRUE, cri="c")
+dimnames(mat) = list(winner = journal, loser = journal)
+fit = vglm(Brat(mat) ~ 1, brat(refgp = 1), trace = TRUE)
+fit = vglm(Brat(mat) ~ 1, brat(refgp = 1), trace = TRUE, crit = "coef")
summary(fit)
-c(0, coef(fit)) # log-abilities (in order of "journal")
-c(1, Coef(fit)) # abilities (in order of "journal")
-fitted(fit) # probabilities of winning in awkward form
-(check = InverseBrat(fitted(fit))) # probabilities of winning
+c(0, coef(fit)) # Log-abilities (in order of "journal")
+c(1, Coef(fit)) # Abilities (in order of "journal")
+fitted(fit) # Probabilities of winning in awkward form
+(check = InverseBrat(fitted(fit))) # Probabilities of winning
check + t(check) # Should be 1's in the off-diagonals
}
\keyword{models}
diff --git a/man/bratUC.Rd b/man/bratUC.Rd
new file mode 100644
index 0000000..88396ec
--- /dev/null
+++ b/man/bratUC.Rd
@@ -0,0 +1,100 @@
+\name{Brat}
+\alias{Brat}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Inputting Data to fit a Bradley Terry Model }
+\description{
+ Takes in a square matrix of counts and outputs
+ them in a form that is accessible to the \code{\link{brat}}
+ and \code{\link{bratt}} family functions.
+
+}
+\usage{
+Brat(mat, ties = 0 * mat, string = c(" > "," == "))
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{mat}{
+ Matrix of counts, which is considered \eqn{M} by \eqn{M} in
+ dimension when there are ties, and \eqn{M+1} by \eqn{M+1}
+ when there are no ties. The rows are winners and the
+ columns are losers, e.g., the 2-1 element is now many
+ times Competitor 2 has beaten Competitor 1. The matrices
+ are best labelled with the competitors' names.
+
+
+}
+ \item{ties}{
+ Matrix of counts.
+ This should be the same dimension as \code{mat}. By
+ default, there are no ties. The matrix must be symmetric,
+ and the diagonal should contain \code{NA}s.
+
+
+}
+ \item{string}{
+ Character.
+ The matrices are labelled with the first value of the
+ descriptor, e.g., \code{"NZ > Oz"} `means' NZ beats
+ Australia in rugby. Suggested alternatives include \code{"
+ beats "} or \code{" wins against "}. The second value
+ is used to handle ties.
+
+
+}
+}
+\details{
+ In the \pkg{VGAM} package it is necessary for each
+ matrix to be represented as a single row of data by
+ \code{\link{brat}} and \code{\link{bratt}}. Hence the
+ non-diagonal elements of the \eqn{M+1} by \eqn{M+1}
+ matrix are concatenated into \eqn{M(M+1)} values (no
+ ties), while if there are ties, the non-diagonal elements
+ of the \eqn{M} by \eqn{M} matrix are concatenated into
+ \eqn{M(M-1)} values.
+
+
+}
+\value{
+ A matrix with 1 row and either \eqn{M(M+1)} or \eqn{M(M-1)}
+ columns.
+
+
+}
+\references{
+
+
+Agresti, A. (2002)
+\emph{Categorical Data Analysis},
+2nd ed. New York: Wiley.
+
+
+}
+\author{ T. W. Yee }
+\note{
+
+ This is a data preprocessing function for
+ \code{\link{brat}} and \code{\link{bratt}}.
+
+
+ Yet to do: merge \code{InverseBrat} into \code{brat}.
+
+
+}
+\seealso{
+ \code{\link{brat}},
+ \code{\link{bratt}},
+ \code{InverseBrat}.
+
+
+}
+\examples{
+journal = c("Biometrika", "Comm Statist", "JASA", "JRSS-B")
+mat = matrix(c( NA, 33, 320, 284, 730, NA, 813, 276,
+ 498, 68, NA, 325, 221, 17, 142, NA), 4, 4)
+dimnames(mat) = list(winner = journal, loser = journal)
+Brat(mat)
+vglm(Brat(mat) ~ 1, brat, trace = TRUE)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/bratt.Rd b/man/bratt.Rd
index d9f7bde..90b229f 100644
--- a/man/bratt.Rd
+++ b/man/bratt.Rd
@@ -3,59 +3,86 @@
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Bradley Terry Model With Ties }
\description{
- Fits a Bradley Terry model with ties (intercept-only model) by maximum
-likelihood estimation.
+ Fits a Bradley Terry model with ties (intercept-only model)
+ by maximum likelihood estimation.
+
}
\usage{
bratt(refgp = "last", refvalue = 1, init.alpha = 1, i0 = 0.01)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{refgp}{ Integer whose value must be from the set
- \{1,\ldots,\eqn{M}\},
- where there are \eqn{M} competitors. The default value indicates
- the last competitor is used---but don't input a character string, in general.
+ \item{refgp}{
+ Integer whose value must be from the set \{1,\ldots,\eqn{M}\},
+ where there are \eqn{M} competitors. The default value
+ indicates the last competitor is used---but don't input
+ a character string, in general.
+
+
+ }
+ \item{refvalue}{
+ Numeric. A positive value for the reference group.
+
+
+ }
+ \item{init.alpha}{
+ Initial values for the \eqn{\alpha}{alpha}s.
+ These are recycled to the appropriate length.
+
+
+ }
+ \item{i0}{
+ Initial value for \eqn{\alpha_0}{alpha_0}.
+ If convergence fails, try another positive value.
+
+
}
- \item{refvalue}{ Numeric. A positive value for the reference group. }
- \item{init.alpha}{ Initial values for the \eqn{\alpha}{alpha}s.
- These are recycled to the appropriate length. }
- \item{i0}{ Initial value for \eqn{\alpha_0}{alpha_0}.
- If convergence fails, try another positive value. }
}
\details{
- There are several models that extend the ordinary Bradley Terry model
- to handle ties. This family function implements one of these models.
- It involves \eqn{M} competitors who either win or lose or tie against
- each other. (If there are no draws/ties then use \code{\link{brat}}).
- The probability that Competitor \eqn{i} beats Competitor \eqn{j} is
- \eqn{\alpha_i / (\alpha_i+\alpha_j+\alpha_0)}{alpha_i / (alpha_i +
- alpha_j + alpha_0)}, where all the \eqn{\alpha}{alpha}s are positive.
- The probability that Competitor \eqn{i} ties with Competitor \eqn{j}
- is \eqn{\alpha_0 / (\alpha_i+\alpha_j+\alpha_0)}{alpha_0 / (alpha_i +
- alpha_j + alpha_0)}. Loosely, the \eqn{\alpha}{alpha}s can be thought
- of as the competitors' `abilities', and \eqn{\alpha_0}{alpha_0}
- is an added parameter to model ties. For identifiability, one of
- the \eqn{\alpha_i}{alpha_i} is set to a known value \code{refvalue},
- e.g., 1. By default, this function chooses the last competitor to
- have this reference value. The data can be represented in the form of
- a \eqn{M} by \eqn{M} matrix of counts, where winners are the rows and
- losers are the columns. However, this is not the way the data should
- be inputted (see below).
+ There are several models that extend the ordinary
+ Bradley Terry model to handle ties. This family function
+ implements one of these models. It involves \eqn{M}
+ competitors who either win or lose or tie against
+ each other. (If there are no draws/ties then use
+ \code{\link{brat}}). The probability that Competitor
+ \eqn{i} beats Competitor \eqn{j} is \eqn{\alpha_i /
+ (\alpha_i+\alpha_j+\alpha_0)}{alpha_i / (alpha_i +
+ alpha_j + alpha_0)}, where all the \eqn{\alpha}{alpha}s
+ are positive. The probability that Competitor \eqn{i}
+ ties with Competitor \eqn{j} is \eqn{\alpha_0 /
+ (\alpha_i+\alpha_j+\alpha_0)}{alpha_0 / (alpha_i +
+ alpha_j + alpha_0)}. Loosely, the \eqn{\alpha}{alpha}s
+ can be thought of as the competitors' `abilities',
+ and \eqn{\alpha_0}{alpha_0} is an added parameter
+ to model ties. For identifiability, one of the
+ \eqn{\alpha_i}{alpha_i} is set to a known value
+ \code{refvalue}, e.g., 1. By default, this function
+ chooses the last competitor to have this reference value.
+ The data can be represented in the form of a \eqn{M}
+ by \eqn{M} matrix of counts, where winners are the rows
+ and losers are the columns. However, this is not the
+ way the data should be inputted (see below).
+
Excluding the reference value/group, this function
chooses \eqn{\log(\alpha_j)}{log(alpha_j)} as the first
- \eqn{M-1} linear predictors. The log link ensures that the
- \eqn{\alpha}{alpha}s are positive. The last linear predictor is
- \eqn{\log(\alpha_0)}{log(alpha_0)}.
+ \eqn{M-1} linear predictors. The log link ensures that
+ the \eqn{\alpha}{alpha}s are positive. The last linear
+ predictor is \eqn{\log(\alpha_0)}{log(alpha_0)}.
+
+
+ The Bradley Terry model can be fitted with covariates,
+ e.g., a home advantage variable, but unfortunately, this
+ lies outside the VGLM theoretical framework and therefore
+ cannot be handled with this code.
- The Bradley Terry model can be fitted with covariates, e.g., a home
- advantage variable, but unfortunately, this lies outside the VGLM
- theoretical framework and therefore cannot be handled with this code.
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}.
+
+
}
\references{
@@ -65,65 +92,74 @@ bratt(refgp = "last", refvalue = 1, init.alpha = 1, i0 = 0.01)
\emph{Proceedings in Computational Statistics COMPSTAT 2004},
Physica-Verlag: Heidelberg. Pages 513--526.
+
}
\author{ T. W. Yee }
\note{
- The function \code{\link{Brat}} is useful for coercing a \eqn{M}
- by \eqn{M} matrix of counts into a one-row matrix suitable for
- \code{bratt}. Diagonal elements are skipped, and the usual S
- order of \code{c(a.matrix)} of elements is used. There should be no
- missing values apart from the diagonal elements of the square matrix.
- The matrix should have winners as the rows, and losers as the columns.
- In general, the response should be a matrix with \eqn{M(M-1)} columns.
+ The function \code{\link{Brat}} is useful for coercing
+ a \eqn{M} by \eqn{M} matrix of counts into a one-row
+ matrix suitable for \code{bratt}. Diagonal elements
+ are skipped, and the usual S order of \code{c(a.matrix)}
+ of elements is used. There should be no missing values
+ apart from the diagonal elements of the square matrix.
+ The matrix should have winners as the rows, and losers
+ as the columns. In general, the response should be a
+ matrix with \eqn{M(M-1)} columns.
+
Also, a symmetric matrix of ties should be passed into
- \code{\link{Brat}}. The diagonal of this matrix should be all
- \code{NA}s.
+ \code{\link{Brat}}. The diagonal of this matrix should
+ be all \code{NA}s.
+
+
+ Only an intercept model is recommended with \code{bratt}.
+ It doesn't make sense really to include covariates because
+ of the limited VGLM framework.
- Only an intercept model is recommended with \code{bratt}. It doesn't
- make sense really to include covariates because of the limited VGLM
- framework.
Notationally, note that the \pkg{VGAM} family function
- \code{\link{brat}} has \eqn{M+1} contestants, while \code{bratt}
- has \eqn{M} contestants.
+ \code{\link{brat}} has \eqn{M+1} contestants, while
+ \code{bratt} has \eqn{M} contestants.
+
}
\seealso{
-\code{\link{brat}},
-\code{\link{Brat}},
-\code{\link{binomialff}}.
+ \code{\link{brat}},
+ \code{\link{Brat}},
+ \code{\link{binomialff}}.
+
+
}
\examples{
# citation statistics: being cited is a 'win'; citing is a 'loss'
journal = c("Biometrika", "Comm.Statist", "JASA", "JRSS-B")
-m = matrix(c( NA, 33, 320, 284,
+mat = matrix(c( NA, 33, 320, 284,
730, NA, 813, 276,
498, 68, NA, 325,
221, 17, 142, NA), 4,4)
-dimnames(m) = list(winner = journal, loser = journal)
+dimnames(mat) = list(winner = journal, loser = journal)
# Add some ties. This is fictitional data.
-ties = 5 + 0*m
+ties = 5 + 0*mat
ties[2,1] = ties[1,2] = 9
# Now fit the model
-fit = vglm(Brat(m, ties) ~ 1, bratt(refgp=1), trace=TRUE)
-fit = vglm(Brat(m, ties) ~ 1, bratt(refgp=1), trace=TRUE, cri="c")
+fit = vglm(Brat(mat, ties) ~ 1, bratt(refgp = 1), trace = TRUE)
+fit = vglm(Brat(mat, ties) ~ 1, bratt(refgp = 1), trace = TRUE, crit = "coef")
summary(fit)
-c(0, coef(fit)) # log-abilities (in order of "journal"); last is log(alpha0)
-c(1, Coef(fit)) # abilities (in order of "journal"); last is alpha0
+c(0, coef(fit)) # Log-abilities (in order of "journal"); last is log(alpha0)
+c(1, Coef(fit)) # Abilities (in order of "journal"); last is alpha0
fit at misc$alpha # alpha_1,...,alpha_M
fit at misc$alpha0 # alpha_0
-fitted(fit) # probabilities of winning and tying, in awkward form
+fitted(fit) # Probabilities of winning and tying, in awkward form
predict(fit)
-(check = InverseBrat(fitted(fit))) # probabilities of winning
-qprob = attr(fitted(fit), "probtie") # probabilities of a tie
-qprobmat = InverseBrat(c(qprob), NCo=nrow(ties)) # probabilities of a tie
+(check = InverseBrat(fitted(fit))) # Probabilities of winning
+qprob = attr(fitted(fit), "probtie") # Probabilities of a tie
+qprobmat = InverseBrat(c(qprob), NCo=nrow(ties)) # Probabilities of a tie
check + t(check) + qprobmat # Should be 1's in the off-diagonals
}
\keyword{models}
diff --git a/man/calibrate.qrrvglm.Rd b/man/calibrate.qrrvglm.Rd
index 2299f13..e90bc84 100644
--- a/man/calibrate.qrrvglm.Rd
+++ b/man/calibrate.qrrvglm.Rd
@@ -10,7 +10,7 @@
}
\usage{
calibrate.qrrvglm(object, newdata = NULL,
- type=c("lv","predictors","response","vcov","all3or4"),
+ type = c("lv", "predictors", "response", "vcov", "all3or4"),
initial.vals = NULL, ...)
}
%- maybe also 'usage' for other objects documented here.
@@ -48,27 +48,35 @@ calibrate.qrrvglm(object, newdata = NULL,
The default is a grid defined by arguments in
\code{\link{calibrate.qrrvglm.control}}.
}
- \item{\dots}{ Arguments that are fed into
- \code{\link{calibrate.qrrvglm.control}}. }
+ \item{\dots}{
+ Arguments that are fed into
+ \code{\link{calibrate.qrrvglm.control}}.
+
+
+}
}
\details{
Given a fitted regression CQO/CAO model,
maximum likelihood calibration is theoretically easy and elegant.
However, the method assumes that all species are
independent, which is not really true in practice.
- More details and references are given in Yee (2005).
+ More details and references are given in Yee (2012).
+
The function \code{\link[stats]{optim}} is used to search for
the maximum likelihood solution. Good initial values are
needed, and \code{\link{calibrate.qrrvglm.control}}
allows the user some control over the choice of these.
+
+
}
\value{
The argument \code{type} determines what is returned.
- If \code{type="all3or4"} then all the \code{type} values are returned
+ If \code{type = "all3or4"} then all the \code{type} values are returned
in a list, with the following components.
Each component has length \code{nrow(newdata)}.
+
\item{lv}{Calibrated latent variables or site scores. }
\item{predictors }{linear/quadratic or additive predictors.
For example, for Poisson families, this will be on a log scale,
@@ -82,11 +90,13 @@ calibrate.qrrvglm(object, newdata = NULL,
}
}
\references{
-Yee, T. W. (2005)
+
+Yee, T. W. (2012)
On constrained and unconstrained
quadratic ordination.
\emph{Manuscript in preparation}.
+
ter Braak, C. J. F. 1995.
Calibration. In:
\emph{Data Analysis in Community and Landscape Ecology}
@@ -95,15 +105,20 @@ van Tongeren, O. F. R. (Eds.)
Cambridge University Press,
Cambridge.
+
}
\author{T. W. Yee}
\note{
Despite the name of this function, UQO and CAO models are handled
as well.
+
+
}
\section{Warning }{
This function is computationally expensive.
- Setting \code{trace=TRUE} to get a running log is a good idea.
+ Setting \code{trace = TRUE} to get a running log is a good idea.
+
+
}
\seealso{
@@ -112,6 +127,8 @@ Cambridge.
\code{\link{cqo}},
\code{\link{uqo}},
\code{\link{cao}}.
+
+
}
\examples{
\dontrun{
@@ -124,15 +141,15 @@ p1 = cqo(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
IToler = TRUE, Crow1positive = TRUE)
siteNos = 1:2 # Calibrate these sites
-cp1 = calibrate(p1, new=data.frame(p1 at y[siteNos,]), trace=TRUE)
+cp1 = calibrate(p1, new=data.frame(p1 at y[siteNos,]), trace = TRUE)
}
\dontrun{
# Graphically compare the actual site scores with their calibrated values
-persp(p1, main="Site scores: solid=actual, dashed=calibrated",
- label=TRUE, col="blue", las=1)
-abline(v=lv(p1)[siteNos], lty=1, col=1:length(siteNos)) # actual site scores
-abline(v=cp1, lty=2, col=1:length(siteNos)) # calibrated values
+persp(p1, main = "Site scores: solid=actual, dashed=calibrated",
+ label = TRUE, col = "blue", las = 1)
+abline(v = lv(p1)[siteNos], lty = 1, col = 1:length(siteNos)) # actual site scores
+abline(v = cp1, lty = 2, col = 1:length(siteNos)) # calibrated values
}
}
\keyword{models}
diff --git a/man/calibrate.qrrvglm.control.Rd b/man/calibrate.qrrvglm.control.Rd
index b99e281..ca41392 100644
--- a/man/calibrate.qrrvglm.control.Rd
+++ b/man/calibrate.qrrvglm.control.Rd
@@ -64,10 +64,12 @@ calibrate.qrrvglm.control(object, trace = FALSE, Method.optim = "BFGS",
}
\references{
-Yee, T. W. (2005)
+
+Yee, T. W. (2012)
On constrained and unconstrained quadratic ordination.
\emph{Manuscript in preparation}.
+
}
\author{T. W. Yee}
\note{
@@ -80,6 +82,7 @@ On constrained and unconstrained quadratic ordination.
\code{\link{calibrate.qrrvglm}},
\code{\link{Coef.qrrvglm}}.
+
}
\examples{
\dontrun{ hspider[,1:6] = scale(hspider[,1:6]) # Needed when ITol = TRUE
diff --git a/man/cdf.lmscreg.Rd b/man/cdf.lmscreg.Rd
index 42de180..7c6dd7b 100644
--- a/man/cdf.lmscreg.Rd
+++ b/man/cdf.lmscreg.Rd
@@ -62,7 +62,7 @@ The CDF values of the model have been placed in
\code{\link{lms.yjn}}.
}
\examples{
-fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bminz)
+fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bmi.nz)
head(fit at post$cdf)
head(cdf(fit)) # Same
head(fit at y)
diff --git a/man/cenpoisson.Rd b/man/cenpoisson.Rd
index d00227b..59c9e32 100644
--- a/man/cenpoisson.Rd
+++ b/man/cenpoisson.Rd
@@ -32,6 +32,7 @@ cenpoisson(link = "loge", earg = list(), imu = NULL)
Only a univariate response is allowed.
The Newton-Raphson algorithm is used.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -80,16 +81,16 @@ cenpoisson(link = "loge", earg = list(), imu = NULL)
\examples{
# Example 1: right censored data
set.seed(123); U = 20
-cdata = data.frame(y = rpois(n <- 100, exp(3)))
+cdata = data.frame(y = rpois(N <- 100, exp(3)))
cdata = transform(cdata, cy = pmin(U, y),
rcensored = (y >= U))
cdata = transform(cdata, status = ifelse(rcensored, 0, 1))
with(cdata, table(cy))
with(cdata, table(rcensored))
-with(cdata, table(ii <- print(SurvS4(cy, status)))) # Check; U+ means >= U
+with(cdata, table(ii <- print(SurvS4(cy, status)))) # Check; U+ means >= U
fit = vglm(SurvS4(cy, status) ~ 1, cenpoisson, cdata, trace = TRUE)
coef(fit, matrix = TRUE)
-table(print(fit at y)) # Another check; U+ means >= U
+table(print(depvar(fit))) # Another check; U+ means >= U
# Example 2: left censored data
@@ -105,37 +106,38 @@ coef(fit, matrix = TRUE)
# Example 3: interval censored data
-cdata = transform(cdata, Lvec = rep(L, len = n),
- Uvec = rep(U, len = n))
-cdata = transform(cdata, icensored = Lvec <= y & y < Uvec) # Neither lcensored or rcensored
+cdata = transform(cdata, Lvec = rep(L, len = N),
+ Uvec = rep(U, len = N))
+cdata = transform(cdata, icensored = Lvec <= y & y < Uvec) # Not lcensored or rcensored
with(cdata, table(icensored))
-cdata = transform(cdata, status = rep(3, n)) # 3 means interval censored
+cdata = transform(cdata, status = rep(3, N)) # 3 means interval censored
cdata = transform(cdata, status = ifelse(rcensored, 0, status)) # 0 means right censored
cdata = transform(cdata, status = ifelse(lcensored, 2, status)) # 2 means left censored
# Have to adjust Lvec and Uvec because of the (start, end] format:
cdata$Lvec[with(cdata, icensored)] = cdata$Lvec[with(cdata, icensored)] - 1
cdata$Uvec[with(cdata, icensored)] = cdata$Uvec[with(cdata, icensored)] - 1
-cdata$Lvec[with(cdata, lcensored)] = cdata$Lvec[with(cdata, lcensored)] # Remains unchanged
-cdata$Lvec[with(cdata, rcensored)] = cdata$Uvec[with(cdata, rcensored)] # Remains unchanged
+cdata$Lvec[with(cdata, lcensored)] = cdata$Lvec[with(cdata, lcensored)] # Unchanged
+cdata$Lvec[with(cdata, rcensored)] = cdata$Uvec[with(cdata, rcensored)] # Unchanged
with(cdata, table(ii <- print(SurvS4(Lvec, Uvec, status, type = "interval")))) # Check
fit = vglm(SurvS4(Lvec, Uvec, status, type = "interval") ~ 1,
cenpoisson, cdata, trace = TRUE)
coef(fit, matrix = TRUE)
-table(print(fit at y)) # Another check
+table(print(depvar(fit))) # Another check
# Example 4: Add in some uncensored observations
-index = (1:n)[with(cdata, icensored)]
+index = (1:N)[with(cdata, icensored)]
index = head(index, 4)
cdata$status[index] = 1 # actual or uncensored value
cdata$Lvec[index] = cdata$y[index]
-with(cdata, table(ii <- print(SurvS4(Lvec, Uvec, status, type = "interval")))) # Check
+with(cdata, table(ii <- print(SurvS4(Lvec, Uvec, status,
+ type = "interval")))) # Check
fit = vglm(SurvS4(Lvec, Uvec, status, type = "interval") ~ 1,
cenpoisson, cdata, trace = TRUE, crit = "c")
coef(fit, matrix = TRUE)
-table(print(fit at y)) # Another check
+table(print(depvar(fit))) # Another check
}
\keyword{models}
\keyword{regression}
diff --git a/man/chestnz.Rd b/man/chest.nz.Rd
similarity index 81%
rename from man/chestnz.Rd
rename to man/chest.nz.Rd
index 0bd9fe4..3f0604a 100644
--- a/man/chestnz.Rd
+++ b/man/chest.nz.Rd
@@ -1,11 +1,11 @@
-\name{chestnz}
-\alias{chestnz}
+\name{chest.nz}
+\alias{chest.nz}
\docType{data}
\title{ Chest Pain in NZ Adults Data}
\description{
Presence/absence of chest pain in 10186 New Zealand adults.
}
-\usage{data(chestnz)}
+\usage{data(chest.nz)}
\format{
A data frame with 73 rows and the following 5 variables.
\describe{
@@ -22,6 +22,7 @@
If yes, they indicated whether it
was on their LHS and/or RHS of their chest.
+
}
\source{
MacMahon, S., Norton, R., Jackson, R., Mackie, M. J.,
@@ -30,14 +31,14 @@
Health Study: design and baseline findings.
\emph{New Zealand Medical Journal},
\bold{108}, 499--502.
+
+
}
\examples{
-fit = vgam(cbind(nolnor, nolr,lnor,lr) ~ s(age, c(4,3)),
- binom2.or(exchan=TRUE, zero=NULL), data = chestnz)
-coef(fit, matrix=TRUE)
-\dontrun{
-plot(fit, which.cf=2, se=TRUE)
-}
+fit = vgam(cbind(nolnor, nolr, lnor, lr) ~ s(age, c(4, 3)),
+ binom2.or(exchan = TRUE, zero = NULL), data = chest.nz)
+coef(fit, matrix = TRUE)
+\dontrun{ plot(fit, which.cf = 2, se = TRUE) }
}
\keyword{datasets}
diff --git a/man/nzc.Rd b/man/chinese.nz.Rd
similarity index 84%
rename from man/nzc.Rd
rename to man/chinese.nz.Rd
index 20f6724..c86a2a4 100644
--- a/man/nzc.Rd
+++ b/man/chinese.nz.Rd
@@ -1,5 +1,5 @@
-\name{nzc}
-\alias{nzc}
+\name{chinese.nz}
+\alias{chinese.nz}
\docType{data}
\title{ Chinese Population in New Zealand 1867--2001 Data}
\description{
@@ -7,7 +7,7 @@
along with the whole of the New Zealand population.
}
-\usage{data(nzc)}
+\usage{data(chinese.nz)}
\format{
A data frame with 26 observations on the following 4 variables.
\describe{
@@ -34,12 +34,12 @@
}
\examples{
-\dontrun{ plot(female/(male+female) ~ year, nzc, type = "b",
+\dontrun{ plot(female/(male+female) ~ year, chinese.nz, type = "b",
ylab = "Proportion", col = "blue", las = 1,
main = "Proportion of NZ Chinese that are female")
abline(h = 0.5, lty = "dashed")
-plot(100*(male+female)/nz ~ year, nzc, type = "b", ylab = "Percent",
+plot(100*(male+female)/nz ~ year, chinese.nz, type = "b", ylab = "Percent",
ylim = c(0, max(100*(male+female)/nz)), col = "blue", las = 1,
main = "Percent of NZers that are Chinese")
abline(h = 0, lty = "dashed") }
diff --git a/man/cqo.Rd b/man/cqo.Rd
index 85d11f3..fe168ba 100644
--- a/man/cqo.Rd
+++ b/man/cqo.Rd
@@ -74,7 +74,7 @@ cqo(formula, family, data = list(), weights = NULL, subset = NULL,
\item{etastart}{
starting values for the linear predictors.
It is a \eqn{M}-column matrix.
- If \eqn{M=1} then it may be a vector.
+ If \eqn{M = 1} then it may be a vector.
Currently, this argument probably should not be used.
}
@@ -273,7 +273,13 @@ Constrained additive ordination.
\emph{Ecology}, \bold{87}, 203--213.
}
-\author{Thomas W. Yee}
+\author{
+Thomas W. Yee.
+Thanks to Alvin Sou for converting a lot of the
+original FORTRAN code into C.
+
+
+}
\note{
@@ -444,14 +450,14 @@ p1ut = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
fam = poissonff, data = hspider, Crow1positive = FALSE,
EqualTol = FALSE)
sort(p1ut at misc$deviance.Bestof) # A history of all the iterations
-if(deviance(p1ut) > 1177) stop("suboptimal fit obtained")
+if(deviance(p1ut) > 1177) warning("suboptimal fit obtained")
\dontrun{
S = ncol(p1ut at y) # Number of species
clr = (1:(S+1))[-7] # Omits yellow
-lvplot(p1ut, y = TRUE, lcol=clr, pch=1:S, pcol=clr, las=1) # ordination diagram
-legend("topright", leg=colnames(p1ut at y), col=clr,
- pch=1:S, merge = TRUE, bty="n", lty=1:S, lwd=2) }
+lvplot(p1ut, y = TRUE, lcol = clr, pch = 1:S, pcol = clr, las = 1) # ordination diagram
+legend("topright", leg = colnames(p1ut at y), col = clr,
+ pch = 1:S, merge = TRUE, bty = "n", lty = 1:S, lwd = 2) }
(cp = Coef(p1ut))
(a = cp at lv[cp at lvOrder]) # The ordered site scores along the gradient
@@ -462,15 +468,15 @@ a = a[!is.na(a)] # Delete the species that is not unimodal
names(a) # Names of the ordered optima along the gradient
\dontrun{
-trplot(p1ut, whichSpecies=1:3, log="xy", type="b", lty=1, lwd=2,
- col=c("blue","red","green"), label = TRUE) -> ii # trajectory plot
-legend(0.00005, 0.3, paste(ii$species[,1], ii$species[,2], sep=" and "),
- lwd=2, lty=1, col=c("blue","red","green"))
-abline(a=0, b=1, lty="dashed")
+trplot(p1ut, whichSpecies = 1:3, log = "xy", type = "b", lty = 1, lwd = 2,
+ col = c("blue","red","green"), label = TRUE) -> ii # trajectory plot
+legend(0.00005, 0.3, paste(ii$species[,1], ii$species[,2], sep = " and "),
+ lwd = 2, lty = 1, col = c("blue","red","green"))
+abline(a = 0, b = 1, lty = "dashed")
S = ncol(p1ut at y) # Number of species
clr = (1:(S+1))[-7] # Omits yellow
-persp(p1ut, col=clr, label = TRUE, las=1) # perspective plot
+persp(p1ut, col = clr, label = TRUE, las = 1) # perspective plot
}
@@ -482,11 +488,11 @@ p1et = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
fam = poissonff, data = hspider, Crow1positive = FALSE)
sort(p1et at misc$deviance.Bestof) # A history of all the iterations
-if(deviance(p1et) > 1586) stop("suboptimal fit obtained")
+if(deviance(p1et) > 1586) warning("suboptimal fit obtained")
\dontrun{
S = ncol(p1et at y) # Number of species
clr = (1:(S+1))[-7] # Omits yellow
-persp(p1et, col=clr, label = TRUE, las=1) }
+persp(p1et, col = clr, label = TRUE, las = 1) }
# Example 3: A rank-2 equal tolerances CQO model with Poisson data
@@ -497,29 +503,29 @@ p2 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
Trocterr, Zoraspin) ~
WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
fam = poissonff, data = hspider, Crow1positive = FALSE,
- IToler = TRUE, Rank = 2, Bestof = 1, isdlv = c(2.1, 0.9))
+ IToler = TRUE, Rank = 2, Bestof = 3, isdlv = c(2.1, 0.9))
sort(p2 at misc$deviance.Bestof) # A history of all the iterations
-if(deviance(p2) > 1127) stop("suboptimal fit obtained")
+if(deviance(p2) > 1127) warning("suboptimal fit obtained")
\dontrun{
-lvplot(p2, ellips = FALSE, label = TRUE, xlim=c(-3,4),
- C = TRUE, Ccol="brown", sites = TRUE, scol="grey",
- pcol="blue", pch="+", chull = TRUE, ccol="grey") }
+lvplot(p2, ellips = FALSE, label = TRUE, xlim = c(-3,4),
+ C = TRUE, Ccol = "brown", sites = TRUE, scol = "grey",
+ pcol = "blue", pch = "+", chull = TRUE, ccol = "grey") }
# Example 4: species packing model with presence/absence data
set.seed(2345)
n = 200; p = 5; S = 5
-mydata = rcqo(n, p, S, fam="binomial", hiabundance=4,
+mydata = rcqo(n, p, S, fam = "binomial", hiabundance = 4,
EqualTol = TRUE, ESOpt = TRUE, EqualMax = TRUE)
myform = attr(mydata, "formula")
set.seed(1234)
-b1et = cqo(myform, binomialff(mv = TRUE, link="cloglog"), data = mydata)
+b1et = cqo(myform, binomialff(mv = TRUE, link = "cloglog"), data = mydata)
sort(b1et at misc$deviance.Bestof) # A history of all the iterations
-\dontrun{ lvplot(b1et, y = TRUE, lcol=1:S, pch=1:S, pcol=1:S, las=1) }
+\dontrun{ lvplot(b1et, y = TRUE, lcol = 1:S, pch = 1:S, pcol = 1:S, las = 1) }
Coef(b1et)
# Compare the fitted model with the 'truth'
-cbind(truth=attr(mydata, "ccoefficients"), fitted=ccoef(b1et))
+cbind(truth=attr(mydata, "ccoefficients"), fitted = ccoef(b1et))
# Example 5: Plot the deviance residuals for diagnostic purposes
@@ -528,18 +534,18 @@ p1et = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
Trocterr, Zoraspin) ~
WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- fam=poissonff, data=hspider, EqualTol = TRUE, trace = FALSE)
+ fam = poissonff, data = hspider, EqualTol = TRUE, trace = FALSE)
sort(p1et at misc$deviance.Bestof) # A history of all the iterations
-if(deviance(p1et) > 1586) stop("suboptimal fit obtained")
+if(deviance(p1et) > 1586) warning("suboptimal fit obtained")
S = ncol(p1et at y)
-par(mfrow=c(3,4))
+par(mfrow = c(3,4))
for(ii in 1:S) {
tempdata = data.frame(lv1 = c(lv(p1et)), sppCounts = p1et at y[,ii])
tempdata = transform(tempdata, myOffset = -0.5 * lv1^2)
# For species ii, refit the model to get the deviance residuals
- fit1 = vglm(sppCounts ~ offset(myOffset) + lv1, fam=poissonff,
- data=tempdata, trace = FALSE)
+ fit1 = vglm(sppCounts ~ offset(myOffset) + lv1, fam = poissonff,
+ data = tempdata, trace = FALSE)
# For checking: this should be 0
print("max(abs(c(Coef(p1et)@B1[1,ii], Coef(p1et)@A[ii,1]) - coef(fit1)))")
@@ -550,13 +556,13 @@ for(ii in 1:S) {
predvalues = predict(fit1) + fit1 at offset
ooo = with(tempdata, order(lv1))
\dontrun{
- with(tempdata, plot(lv1, predvalues + devresid, col="darkgreen",
- xlab="lv1", ylab="", main=colnames(p1et at y)[ii]))
- with(tempdata, lines(lv1[ooo], predvalues[ooo], col="blue")) }
+ with(tempdata, plot(lv1, predvalues + devresid, col = "darkgreen",
+ xlab = "lv1", ylab = "", main = colnames(p1et at y)[ii]))
+ with(tempdata, lines(lv1[ooo], predvalues[ooo], col = "blue")) }
}
}
\keyword{models}
\keyword{regression}
-%legend("topright", x=1, y=135, leg=colnames(p1ut at y), col=clr,
-% pch=1:S, merge = TRUE, bty="n", lty=1:S, lwd=2)
+%legend("topright", x=1, y=135, leg = colnames(p1ut at y), col = clr,
+% pch = 1:S, merge = TRUE, bty = "n", lty = 1:S, lwd = 2)
diff --git a/man/uscrime.Rd b/man/crime.us.Rd
similarity index 91%
rename from man/uscrime.Rd
rename to man/crime.us.Rd
index 8e7a2f3..c00972d 100644
--- a/man/uscrime.Rd
+++ b/man/crime.us.Rd
@@ -1,5 +1,5 @@
-\name{uscrime}
-\alias{uscrime}
+\name{crime.us}
+\alias{crime.us}
\docType{data}
\title{
Estimated Crime in 2009 in USA
@@ -9,7 +9,7 @@ Estimated Crime in 2009 in USA
Crime totals and rates, cross-classified by US state, during 2009.
}
-\usage{data(uscrime)}
+\usage{data(crime.us)}
\format{
A data frame with 50 observations on the following 22 variables.
\describe{
@@ -69,13 +69,13 @@ Crime totals and rates, cross-classified by US state, during 2009.
%%}
\examples{
\dontrun{ # Louisiana is the one outlier
-plot(MurderRate ~ stateNumber, uscrime,
+plot(MurderRate ~ stateNumber, crime.us,
axes = FALSE, type = "h", col = 1:6,
main = "USA murder rates in 2009 (per 100,000 population)")
-axis(1, with(uscrime, abbrev), at = with(uscrime, stateNumber),
+axis(1, with(crime.us, abbrev), at = with(crime.us, stateNumber),
col = 1:6, col.tick = 1:6, cex.lab = 0.5)
axis(2) }
-tail(uscrime[ sort.list(with(uscrime, MurderRate)), ])
+tail(crime.us[ sort.list(with(crime.us, MurderRate)), ])
}
\keyword{datasets}
-% data(uscrime)
+% data(crime.us)
diff --git a/man/cumulative.Rd b/man/cumulative.Rd
index f96434d..be37157 100644
--- a/man/cumulative.Rd
+++ b/man/cumulative.Rd
@@ -14,7 +14,7 @@ cumulative(link = "logit", earg = list(), parallel = FALSE,
}
%scumulative(link="logit", earg = list(),
% lscale="loge", escale = list(),
-% parallel=FALSE, sparallel=TRUE, reverse=FALSE, iscale = 1)
+% parallel = FALSE, sparallel = TRUE, reverse = FALSE, iscale = 1)
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -36,12 +36,14 @@ cumulative(link = "logit", earg = list(), parallel = FALSE,
List. Extra argument for the link function.
See \code{earg} in \code{\link{Links}} for general information.
+
}
\item{parallel}{
A logical or formula specifying which terms have
equal/unequal coefficients.
See below for more information about the parallelism assumption.
+
}
% \item{sparallel}{
% For the scaling parameters.
@@ -70,6 +72,7 @@ cumulative(link = "logit", earg = list(), parallel = FALSE,
if \code{reverse = FALSE} for then the cutpoints must be an
decreasing sequence.
+
}
\item{mv}{
Logical.
@@ -79,6 +82,7 @@ cumulative(link = "logit", earg = list(), parallel = FALSE,
Each column of the matrix is a response, i.e., multivariate response.
A suitable matrix can be obtained from \code{Cut}.
+
}
\item{intercept.apply}{
Logical.
@@ -88,6 +92,7 @@ cumulative(link = "logit", earg = list(), parallel = FALSE,
\code{\link{polf}},
\code{\link{nbolf}}.
+
}
% \item{iscale}{
% Numeric. Initial values for the scale parameters.
@@ -127,20 +132,24 @@ cumulative(link = "logit", earg = list(), parallel = FALSE,
Currently, reduced-rank vector generalized additive models
(RR-VGAMs) have not been implemented here.
+
% The scaled version of \code{cumulative()}, called \code{scumulative()},
% has \eqn{J} positive scaling factors.
% They are described in pages 154 and 177 of McCullagh and Nelder (1989);
% see their equation (5.4) in particular,
% which they call the \emph{generalized rational model}.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
+
Agresti, A. (2002)
\emph{Categorical Data Analysis},
2nd ed. New York: Wiley.
@@ -207,7 +216,7 @@ by the \pkg{VGAM} package can be found at
then numerical problems are less likely to occur during the fitting,
and there are less parameters. Numerical problems occur when
the linear/additive predictors cross, which results in probabilities
- outside of \eqn{(0,1)}; setting \code{parallel=TRUE} will help avoid
+ outside of \eqn{(0,1)}; setting \code{parallel = TRUE} will help avoid
this problem.
@@ -232,11 +241,13 @@ by the \pkg{VGAM} package can be found at
To fit the proportional odds model one can use the
\pkg{VGAM} family function \code{\link{propodds}}.
Note that \code{propodds(reverse)} is equivalent to
- \code{cumulative(parallel=TRUE, reverse=reverse)} (which is equivalent to
- \code{cumulative(parallel=TRUE, reverse=reverse, link="logit")}).
- It is for convenience only. A call to \code{cumulative()} is preferred
- since it reminds the user that a parallelism assumption is made, as
- well as being a lot more flexible.
+ \code{cumulative(parallel = TRUE, reverse = reverse)} (which is
+ equivalent to
+ \code{cumulative(parallel = TRUE, reverse = reverse, link = "logit")}).
+ It is for convenience only. A call to
+ \code{cumulative()} is preferred since it reminds the user
+ that a parallelism assumption is made, as well as being a lot
+ more flexible.
% In the future, this family function may be renamed to
@@ -250,6 +261,7 @@ by the \pkg{VGAM} package can be found at
No check is made to verify that the response is ordinal;
see \code{\link[base:factor]{ordered}}.
+
}
\seealso{
@@ -290,30 +302,31 @@ fit2 = vgam(cbind(normal, mild, severe) ~ s(let, df = 2),
# Check the proportional odds assumption with a LRT ----------
(fit3 = vglm(cbind(normal, mild, severe) ~ let,
- cumulative(parallel=FALSE, reverse=TRUE), pneumo))
-pchisq(2*(logLik(fit3)-logLik(fit)),
- df = length(coef(fit3))-length(coef(fit)), lower.tail = FALSE)
+ cumulative(parallel = FALSE, reverse = TRUE), pneumo))
+pchisq(2 * (logLik(fit3) - logLik(fit)),
+ df = length(coef(fit3)) - length(coef(fit)), lower.tail = FALSE)
+lrtest(fit3, fit) # More elegant
# A factor() version of fit ----------------------------------
# This is in long format (cf. wide format above)
-nobs = round(fit at y * c(weights(fit, type = "prior")))
-sumnobs = colSums(nobs) # apply(nobs, 2, sum)
+Nobs = round(depvar(fit) * c(weights(fit, type = "prior")))
+sumNobs = colSums(Nobs) # apply(Nobs, 2, sum)
-pneumo.long = data.frame(symptoms = ordered(rep(rep(colnames(nobs),
- nrow(nobs)),
- times = c(t(nobs))),
- levels = colnames(nobs)),
- let = rep(rep(with(pneumo, let), each = ncol(nobs)),
- times = c(t(nobs))))
-with(pneumo.long, table(let, symptoms)) # check it; should be same as pneumo
+pneumo.long =
+ data.frame(symptoms = ordered(rep(rep(colnames(Nobs), nrow(Nobs)),
+ times = c(t(Nobs))),
+ levels = colnames(Nobs)),
+ let = rep(rep(with(pneumo, let), each = ncol(Nobs)),
+ times = c(t(Nobs))))
+with(pneumo.long, table(let, symptoms)) # Check it; should be same as pneumo
(fit.long1 = vglm(symptoms ~ let, data = pneumo.long,
- cumulative(parallel=TRUE, reverse=TRUE), trace=TRUE))
-coef(fit.long1, matrix=TRUE) # Should be same as coef(fit, matrix=TRUE)
+ cumulative(parallel = TRUE, reverse = TRUE), trace = TRUE))
+coef(fit.long1, matrix = TRUE) # Should be same as coef(fit, matrix = TRUE)
# Could try using mustart if fit.long1 failed to converge.
-mymustart = matrix(sumnobs / sum(sumnobs),
- nrow(pneumo.long), ncol(nobs), byrow = TRUE)
+mymustart = matrix(sumNobs / sum(sumNobs),
+ nrow(pneumo.long), ncol(Nobs), byrow = TRUE)
fit.long2 = vglm(symptoms ~ let,
fam = cumulative(parallel = TRUE, reverse = TRUE),
mustart = mymustart, data = pneumo.long, trace = TRUE)
diff --git a/man/dagum.Rd b/man/dagum.Rd
index dad463c..fc6ef4a 100644
--- a/man/dagum.Rd
+++ b/man/dagum.Rd
@@ -7,24 +7,24 @@
Dagum distribution.
}
\usage{
-dagum(link.a = "loge", link.scale = "loge", link.p = "loge",
- earg.a = list(), earg.scale = list(), earg.p = list(),
- init.a = NULL, init.scale = NULL, init.p = 1, zero = NULL)
+dagum(lshape1.a = "loge", lscale = "loge", lshape2.p = "loge",
+ eshape1.a = list(), escale = list(), eshape2.p = list(),
+ ishape1.a = NULL, iscale = NULL, ishape2.p = 1, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link.a, link.scale, link.p}{
+ \item{lshape1.a, lscale, lshape2.p}{
Parameter link functions applied to the
(positive) parameters \code{a}, \code{scale}, and \code{p}.
See \code{\link{Links}} for more choices.
}
- \item{earg.a, earg.scale, earg.p}{
+ \item{eshape1.a, escale, eshape2.p}{
List. Extra argument for each of the links.
See \code{earg} in \code{\link{Links}} for general information.
}
- \item{init.a, init.scale, init.p}{
+ \item{ishape1.a, iscale, ishape2.p}{
Optional initial values for \code{a}, \code{scale}, and \code{p}.
}
@@ -60,7 +60,7 @@ and the others are shape parameters.
The mean is
\deqn{E(Y) = b \, \Gamma(p + 1/a) \, \Gamma(1 - 1/a) / \Gamma(p)}{%
E(Y) = b gamma(p + 1/a) gamma(1 - 1/a) / gamma(p)}
-provided \eqn{-ap < 1 < a}.
+provided \eqn{-ap < 1 < a}; these are returned as the fitted values.
}
@@ -75,7 +75,7 @@ provided \eqn{-ap < 1 < a}.
Kleiber, C. and Kotz, S. (2003)
\emph{Statistical Size Distributions in Economics and
Actuarial Sciences},
-Hoboken, NJ: Wiley-Interscience.
+Hoboken, NJ, USA: Wiley-Interscience.
}
@@ -112,7 +112,7 @@ while estimates for \eqn{a} and \eqn{p} can be considered unbiased for
\examples{
ddata = data.frame(y = rdagum(n = 3000, 4, 6, 2))
fit = vglm(y ~ 1, dagum, ddata, trace = TRUE)
-fit = vglm(y ~ 1, dagum(init.a = 2.1), ddata, trace = TRUE, crit = "c")
+fit = vglm(y ~ 1, dagum(ishape1.a = 2.1), ddata, trace = TRUE, crit = "c")
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/DagumUC.Rd b/man/dagumUC.Rd
similarity index 73%
rename from man/DagumUC.Rd
rename to man/dagumUC.Rd
index 5b19c12..ce5aa57 100644
--- a/man/DagumUC.Rd
+++ b/man/dagumUC.Rd
@@ -9,23 +9,24 @@
Density, distribution function, quantile function and random
generation for the Dagum distribution with shape parameters \code{a}
and \code{p}, and scale parameter \code{scale}.
+
}
\usage{
-ddagum(x, a, scale, p.arg, log=FALSE)
-pdagum(q, a, scale, p.arg)
-qdagum(p, a, scale, p.arg)
-rdagum(n, a, scale, p.arg)
+ddagum(x, shape1.a, scale, shape2.p, log = FALSE)
+pdagum(q, shape1.a, scale, shape2.p)
+qdagum(p, shape1.a, scale, shape2.p)
+rdagum(n, shape1.a, scale, shape2.p)
}
\arguments{
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
\item{n}{number of observations. If \code{length(n) > 1}, the length
is taken to be the number required.}
- \item{a, p.arg}{shape parameters.}
+ \item{shape1.a, shape2.p}{shape parameters.}
\item{scale}{scale parameter.}
\item{log}{
Logical.
- If \code{log=TRUE} then the logarithm of the density is returned.
+ If \code{log = TRUE} then the logarithm of the density is returned.
}
@@ -35,30 +36,36 @@ rdagum(n, a, scale, p.arg)
\code{pdagum} gives the distribution function,
\code{qdagum} gives the quantile function, and
\code{rdagum} generates random deviates.
+
}
\references{
+
Kleiber, C. and Kotz, S. (2003)
\emph{Statistical Size Distributions in Economics and
Actuarial Sciences},
-Hoboken, NJ: Wiley-Interscience.
+Hoboken, NJ, USA: Wiley-Interscience.
+
}
\author{ T. W. Yee }
\details{
See \code{\link{dagum}}, which is the \pkg{VGAM} family function
for estimating the parameters by maximum likelihood estimation.
+
}
\note{
The Dagum distribution is a special case of the 4-parameter
generalized beta II distribution.
+
}
\seealso{
\code{\link{dagum}},
\code{\link{genbetaII}}.
+
}
\examples{
-y = rdagum(n=3000, 4, 6, 2)
-fit = vglm(y ~ 1, dagum(init.a=2.1), trace=TRUE, crit="c")
-coef(fit, mat=TRUE)
+ddata = data.frame(y = rdagum(n = 3000, 4, 6, 2))
+fit = vglm(y ~ 1, dagum(ishape1.a = 2.1), ddata, trace = TRUE, crit = "coef")
+coef(fit, matrix = TRUE)
Coef(fit)
}
\keyword{distribution}
diff --git a/man/deplot.lmscreg.Rd b/man/deplot.lmscreg.Rd
index 6fad0c3..eb3ed12 100644
--- a/man/deplot.lmscreg.Rd
+++ b/man/deplot.lmscreg.Rd
@@ -69,7 +69,7 @@ contains further information and examples.
}
\examples{\dontrun{
-fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bminz)
+fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bmi.nz)
ygrid = seq(15, 43, by=0.25)
deplot(fit, x0=20, y=ygrid, xlab="BMI", col="green", llwd=2,
main="BMI distribution at ages 20 (green), 40 (blue), 60 (red)")
diff --git a/man/eunifUC.Rd b/man/eunifUC.Rd
index 5f39766..ff543c9 100644
--- a/man/eunifUC.Rd
+++ b/man/eunifUC.Rd
@@ -102,13 +102,14 @@ very close to 0 or 1.
}
\references{
+
Jones, M. C. (1994)
Expectiles and M-quantiles are quantiles.
\emph{Statistics and Probability Letters},
\bold{20}, 149--153.
-Yee, T. W. (2010)
+Yee, T. W. (2012)
Vector generalized linear and additive
quantile and expectile regression.
\emph{In preparation}.
@@ -165,11 +166,11 @@ par(mfrow=c(2,1))
yy = seq(0.0, 1.0, len=nn)
plot(yy, deunif(yy), type="l", col="blue", ylim = c(0, 2),
xlab = "y", ylab = "g(y)", main = "g(y) for Uniform(0,1)")
-lines(yy, dunif(yy), col="darkgreen", lty="dotted", lwd=2) # 'original'
+lines(yy, dunif(yy), col="darkgreen", lty="dotted", lwd = 2) # 'original'
plot(yy, peunif(yy), type="l", col="blue", ylim = 0:1,
xlab = "y", ylab = "G(y)", main = "G(y) for Uniform(0,1)")
-abline(a=0.0, b=1.0, col="darkgreen", lty="dotted", lwd=2)
+abline(a=0.0, b=1.0, col="darkgreen", lty="dotted", lwd = 2)
abline(v=0.5, h=0.5, col="red", lty="dashed") }
}
\keyword{distribution}
diff --git a/man/exppoisson.Rd b/man/exppoisson.Rd
index 178f1aa..a38653a 100644
--- a/man/exppoisson.Rd
+++ b/man/exppoisson.Rd
@@ -48,8 +48,13 @@ exppoisson(llambda = "loge", lbetave = "loge", elambda = list(),
and scale, \eqn{\beta}{b}, are positive.
The distribution implies a population facing discrete
hazard rates which are multiples of a base hazard.
- This \pkg{VGAM} family function requires the \pkg{hypergeo} package
- (to use their \code{\link[hypergeo]{genhypergeo}} function).
+ This \pkg{VGAM} family function requires the \code{hypergeo} package
+ (to use their \code{genhypergeo} function).
+
+
+% This \pkg{VGAM} family function requires the \pkg{hypergeo} package
+% (to use their \code{\link[hypergeo]{genhypergeo}} function).
+
}
diff --git a/man/fgm.Rd b/man/fgm.Rd
index 97d3bc9..4d6f887 100644
--- a/man/fgm.Rd
+++ b/man/fgm.Rd
@@ -64,13 +64,14 @@ fgm(lapar="rhobit", earg=list(), iapar=NULL, imethod=1, nsimEIM=200)
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\references{
Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005)
\emph{Extreme Value and Related Models with Applications in Engineering and Science},
-Hoboken, N.J.: Wiley-Interscience.
+Hoboken, NJ, USA: Wiley-Interscience.
}
\author{ T. W. Yee }
@@ -91,10 +92,10 @@ Hoboken, N.J.: Wiley-Interscience.
}
\examples{
-ymat = rfgm(n = 1000, alpha=rhobit(3, inverse=TRUE))
-\dontrun{plot(ymat, col="blue")}
-fit = vglm(ymat ~ 1, fam=fgm, trace=TRUE)
-coef(fit, matrix=TRUE)
+ymat = rfgm(n = 1000, alpha = rhobit(3, inverse = TRUE))
+\dontrun{plot(ymat, col = "blue")}
+fit = vglm(ymat ~ 1, fam = fgm, trace = TRUE)
+coef(fit, matrix = TRUE)
Coef(fit)
head(fitted(fit))
}
diff --git a/man/fgmUC.Rd b/man/fgmUC.Rd
index c3827d7..7a7ee66 100644
--- a/man/fgmUC.Rd
+++ b/man/fgmUC.Rd
@@ -11,7 +11,7 @@
}
\usage{
-dfgm(x1, x2, alpha, log=FALSE)
+dfgm(x1, x2, alpha, log = FALSE)
pfgm(q1, q2, alpha)
rfgm(n, alpha)
}
@@ -30,6 +30,7 @@ rfgm(n, alpha)
\code{dfgm} gives the density,
\code{pfgm} gives the distribution function, and
\code{rfgm} generates random deviates (a two-column matrix).
+
}
%\references{
%
@@ -41,6 +42,7 @@ rfgm(n, alpha)
parameter by maximum likelihood estimation, for the formula of the
cumulative distribution function and other details.
+
}
%\note{
%}
@@ -50,19 +52,17 @@ rfgm(n, alpha)
}
\examples{
\dontrun{
-N = 101
-x = seq(0.0, 1.0, len=N)
-alpha = 0.7
+N = 101; x = seq(0.0, 1.0, len = N); alpha = 0.7
ox = expand.grid(x, x)
z = dfgm(ox[,1], ox[,2], alpha=alpha)
contour(x, x, matrix(z, N, N), col="blue")
z = pfgm(ox[,1], ox[,2], alpha=alpha)
contour(x, x, matrix(z, N, N), col="blue")
-plot(r <- rfgm(n=3000, alpha=alpha), col="blue")
-par(mfrow=c(1,2))
-hist(r[,1]) # Should be uniform
-hist(r[,2]) # Should be uniform
+plot(r <- rfgm(n = 3000, alpha = alpha), col = "blue")
+par(mfrow = c(1, 2))
+hist(r[, 1]) # Should be uniform
+hist(r[, 2]) # Should be uniform
}
}
\keyword{distribution}
diff --git a/man/fisk.Rd b/man/fisk.Rd
index 43ab98d..7b2ddc6 100644
--- a/man/fisk.Rd
+++ b/man/fisk.Rd
@@ -8,23 +8,23 @@
}
\usage{
-fisk(link.a = "loge", link.scale = "loge", earg.a=list(),
- earg.scale=list(), init.a = NULL, init.scale = NULL, zero = NULL)
+fisk(lshape1.a = "loge", lscale = "loge", eshape1.a = list(),
+ escale = list(), ishape1.a = NULL, iscale = NULL, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link.a, link.scale}{
+ \item{lshape1.a, lscale}{
Parameter link functions applied to the
(positive) parameters \code{a} and \code{scale}.
See \code{\link{Links}} for more choices.
}
- \item{earg.a, earg.scale}{
+ \item{eshape1.a, escale}{
List. Extra argument for each of the links.
See \code{earg} in \code{\link{Links}} for general information.
}
- \item{init.a, init.scale}{
+ \item{ishape1.a, iscale}{
Optional initial values for \code{a} and \code{scale}.
}
@@ -57,7 +57,7 @@ The cumulative distribution function is
The mean is
\deqn{E(Y) = b \, \Gamma(1 + 1/a) \, \Gamma(1 - 1/a)}{%
E(Y) = b gamma(1 + 1/a) gamma(1 - 1/a)}
-provided \eqn{a > 1}.
+provided \eqn{a > 1}; these are returned as the fitted values.
}
@@ -97,10 +97,10 @@ Hoboken, NJ: Wiley-Interscience.
}
\examples{
-y = rfisk(n=200, 4, 6)
-fit = vglm(y ~ 1, fisk, trace=TRUE)
-fit = vglm(y ~ 1, fisk(init.a=3.3), trace=TRUE, crit="c")
-coef(fit, mat=TRUE)
+fdata = data.frame(y = rfisk(n = 200, 4, 6))
+fit = vglm(y ~ 1, fisk, fdata, trace = TRUE)
+fit = vglm(y ~ 1, fisk(ishape1.a = 3.3), fdata, trace = TRUE, crit = "coef")
+coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
}
diff --git a/man/FiskUC.Rd b/man/fiskUC.Rd
similarity index 74%
rename from man/FiskUC.Rd
rename to man/fiskUC.Rd
index 429507a..3d8f4cc 100644
--- a/man/FiskUC.Rd
+++ b/man/fiskUC.Rd
@@ -11,21 +11,21 @@
and scale parameter \code{scale}.
}
\usage{
-dfisk(x, a, scale=1, log=FALSE)
-pfisk(q, a, scale=1)
-qfisk(p, a, scale=1)
-rfisk(n, a, scale=1)
+dfisk(x, shape1.a, scale = 1, log = FALSE)
+pfisk(q, shape1.a, scale = 1)
+qfisk(p, shape1.a, scale = 1)
+rfisk(n, shape1.a, scale = 1)
}
\arguments{
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
\item{n}{number of observations.
If \code{length(n) > 1} then the length is taken to be the number required.}
- \item{a}{shape parameter.}
+ \item{shape1.a}{shape parameter.}
\item{scale}{scale parameter.}
\item{log}{
Logical.
- If \code{log=TRUE} then the logarithm of the density is returned.
+ If \code{log = TRUE} then the logarithm of the density is returned.
}
@@ -35,30 +35,39 @@ rfisk(n, a, scale=1)
\code{pfisk} gives the distribution function,
\code{qfisk} gives the quantile function, and
\code{rfisk} generates random deviates.
+
}
\references{
+
Kleiber, C. and Kotz, S. (2003)
\emph{Statistical Size Distributions in Economics and
Actuarial Sciences},
-Hoboken, NJ: Wiley-Interscience.
+Hoboken, NJ, USA: Wiley-Interscience.
+
+
}
\author{ T. W. Yee }
\details{
See \code{\link{fisk}}, which is the \pkg{VGAM} family function
for estimating the parameters by maximum likelihood estimation.
+
+
}
\note{
The Fisk distribution is a special case of the 4-parameter
generalized beta II distribution.
+
+
}
\seealso{
\code{\link{fisk}},
\code{\link{genbetaII}}.
+
}
\examples{
-y = rfisk(n=1000, 4, 6)
-fit = vglm(y ~ 1, fisk, trace=TRUE, crit="c")
-coef(fit, mat=TRUE)
+fdata = data.frame(y = rfisk(n = 1000, 4, 6))
+fit = vglm(y ~ 1, fisk, data = fdata, trace = TRUE, crit = "coef")
+coef(fit, matrix = TRUE)
Coef(fit)
}
\keyword{distribution}
diff --git a/man/fittedvlm.Rd b/man/fittedvlm.Rd
index 97ccf7b..7679474 100644
--- a/man/fittedvlm.Rd
+++ b/man/fittedvlm.Rd
@@ -76,10 +76,10 @@ fitted(fit)
# LMS quantile regression example 2
fit = vgam(BMI ~ s(age, df = c(4,2)),
- fam = lms.bcn(zero = 1), data = bminz, trace = TRUE)
+ fam = lms.bcn(zero = 1), data = bmi.nz, trace = TRUE)
head(predict(fit, type = "r")) # The following three are equal
head(fitted(fit))
-predict(fit, type = "r", newdata = head(bminz))
+predict(fit, type = "r", newdata = head(bmi.nz))
}
\keyword{models}
\keyword{regression}
diff --git a/man/frechet.Rd b/man/frechet.Rd
index 10061b2..6a3d886 100644
--- a/man/frechet.Rd
+++ b/man/frechet.Rd
@@ -112,7 +112,7 @@ frechet2(location = 0, lscale = "loge", lshape = "logoff",
Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005)
\emph{Extreme Value and Related Models with Applications
in Engineering and Science},
-Hoboken, N.J.: Wiley-Interscience.
+Hoboken, NJ, USA: Wiley-Interscience.
}
\author{ T. W. Yee }
diff --git a/man/frechetUC.Rd b/man/frechetUC.Rd
index 5b55561..cb6ce5b 100644
--- a/man/frechetUC.Rd
+++ b/man/frechetUC.Rd
@@ -43,7 +43,7 @@ rfrechet(n, location = 0, scale = 1, shape)
Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005)
\emph{Extreme Value and Related Models with Applications in
Engineering and Science},
-Hoboken, N.J.: Wiley-Interscience.
+Hoboken, NJ, USA: Wiley-Interscience.
}
\author{ T. W. Yee }
diff --git a/man/garma.Rd b/man/garma.Rd
index 888ce10..6c68f30 100644
--- a/man/garma.Rd
+++ b/man/garma.Rd
@@ -4,9 +4,10 @@
\title{GARMA (Generalized Autoregressive Moving-Average) Models}
\description{
Fits GARMA models to time series data.
+
}
\usage{
-garma(link = "identity", earg=list(), p.ar.lag = 1, q.lag.ma = 0,
+garma(link = "identity", earg=list(), p.ar.lag = 1, q.ma.lag = 0,
coefstart = NULL, step = 1)
}
%- maybe also 'usage' for other objects documented here.
@@ -19,6 +20,7 @@ garma(link = "identity", earg=list(), p.ar.lag = 1, q.lag.ma = 0,
\code{\link{cloglog}},
\code{\link{cauchit}} are suitable for binary responses.
+
}
\item{earg}{
List. Extra argument for the link.
@@ -31,28 +33,33 @@ garma(link = "identity", earg=list(), p.ar.lag = 1, q.lag.ma = 0,
See \code{\link{loge}} and \code{\link{logit}} etc. for specific
information about each link function.
+
}
-\item{p.ar.lag}{
+ \item{p.ar.lag}{
A positive integer,
the lag for the autoregressive component.
Called \eqn{p} below.
+
}
-\item{q.lag.ma}{
+ \item{q.ma.lag}{
A non-negative integer,
the lag for the moving-average component.
Called \eqn{q} below.
+
}
\item{coefstart}{
Starting values for the coefficients.
For technical reasons, the
argument \code{coefstart} in \code{\link{vglm}} cannot be used.
+
}
\item{step}{
Numeric. Step length, e.g., \code{0.5} means half-stepsizing.
+
}
% \item{constant}{
% Used when the log or logit link is chosen.
@@ -72,7 +79,8 @@ garma(link = "identity", earg=list(), p.ar.lag = 1, q.lag.ma = 0,
given in the \code{link} argument reflect this, and the user
must choose an appropriate link.
- The GARMA(\eqn{p,q}) model is defined by firstly
+
+ The GARMA(\eqn{p, q}) model is defined by firstly
having a response belonging to the exponential family
\deqn{f(y_t|D_t) = \exp
\left\{ \frac{y_t \theta_t - b(\theta_t)}{\phi / A_t} +
@@ -95,7 +103,7 @@ garma(link = "identity", earg=list(), p.ar.lag = 1, q.lag.ma = 0,
\eqn{D_t=\{x_t,\ldots,x_1,y_{t-1},\ldots,y_1,\mu_{t-1},\ldots,\mu_1\}}{
D_t={x_t,\ldots,x_1,y_(t-1),\ldots,y_1,mu_(t-1),\ldots,mu_1}}
is the previous information set.
- Secondly, the GARMA(\eqn{p,q}) model is defined by
+ Secondly, the GARMA(\eqn{p, q}) model is defined by
\deqn{g(\mu_t) = \eta_t = x_t^T \beta +
\sum_{k=1}^p \phi_k (g(y_{t-k}) - x_{t-k}^T \beta) +
\sum_{k=1}^q \theta_k (g(y_{t-k}) - \eta_{t-k}).}{%
@@ -105,11 +113,13 @@ garma(link = "identity", earg=list(), p.ar.lag = 1, q.lag.ma = 0,
Parameter vectors \eqn{\beta}{beta}, \eqn{\phi}{phi} and \eqn{\theta}{theta}
are estimated by maximum likelihood.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}.
+
}
\references{
Benjamin, M. A., Rigby, R. A. and Stasinopoulos, M. D. (1998)
@@ -117,36 +127,46 @@ garma(link = "identity", earg=list(), p.ar.lag = 1, q.lag.ma = 0,
\emph{Proceedings in Computational Statistics COMPSTAT 1998} by
Payne, R. and P. J. Green. Physica-Verlag.
+
Benjamin, M. A., Rigby, R. A. and Stasinopoulos, M. D. (2003)
Generalized Autoregressive Moving Average Models.
\emph{Journal of the American Statistical Association},
\bold{98}: 214--223.
+
Zeger, S. L. and Qaqish, B. (1988)
Markov regression models for time series: a quasi-likelihood approach.
\emph{Biometrics},
\bold{44}: 1019--1031.
+
}
\author{ T. W. Yee }
\note{
This function is unpolished and is requires lots
of improvements. In particular, initialization is quite poor,
- and could be improved.
+ and ought to be improved.
A limited amount of experience has shown that half-stepsizing is
- often needed for convergence, therefore choosing \code{crit="coef"}
+ often needed for convergence, therefore choosing \code{crit = "coef"}
is not recommended.
+
Overdispersion is not handled.
+ For binomial responses it is currently best to input a vector
+ of 1s and 0s rather than the \code{cbind(successes, failures)}
+ because the initialize slot is rudimentary.
+
+
}
\section{Warning}{
- This \pkg{VGAM} family function is `non-standard' in that the model does need
- some coercing to get it into the VGLM framework.
- Special code is required to get it running.
- A consequence is that some methods functions may give wrong results
- when applied to the fitted object.
+ This \pkg{VGAM} family function is 'non-standard' in that the
+ model does need some coercing to get it into the VGLM framework.
+ Special code is required to get it running. A consequence is
+ that some methods functions may give wrong results when applied
+ to the fitted object.
+
}
\seealso{
@@ -156,11 +176,11 @@ garma(link = "identity", earg=list(), p.ar.lag = 1, q.lag.ma = 0,
The site \url{http://www.stat.auckland.ac.nz/~yee} contains
more documentation about this family function.
+
}
\examples{
-# See Zeger and Qaqish (1988)
-interspike = c(68, 41, 82, 66, 101, 66, 57, 41, 27, 78,
+gdata = data.frame(interspike = c(68, 41, 82, 66, 101, 66, 57, 41, 27, 78,
59, 73, 6, 44, 72, 66, 59, 60, 39, 52,
50, 29, 30, 56, 76, 55, 73, 104, 104, 52,
25, 33, 20, 60, 47, 6, 47, 22, 35, 30,
@@ -169,20 +189,19 @@ interspike = c(68, 41, 82, 66, 101, 66, 57, 41, 27, 78,
2, 30, 18, 17, 28, 9, 28, 20, 17, 12,
19, 18, 14, 23, 18, 22, 18, 19, 26, 27,
23, 24, 35, 22, 29, 28, 17, 30, 34, 17,
-20, 49, 29, 35, 49, 25, 55, 42, 29, 16)
-spikenum = seq(interspike)
+20, 49, 29, 35, 49, 25, 55, 42, 29, 16)) # See Zeger and Qaqish (1988)
+gdata = transform(gdata, spikenum = seq(interspike))
bvalue = 0.1 # .Machine$double.xmin # Boundary value
-fit = vglm(interspike ~ 1, trace=TRUE,
- garma("loge", earg=list(bvalue=bvalue), p=2, coef=c(4,.3,.4)))
+fit = vglm(interspike ~ 1, trace = TRUE, data = gdata,
+ garma("loge", earg = list(bvalue = bvalue),
+ p = 2, coef = c(4, 0.3, 0.4)))
summary(fit)
-coef(fit, matrix=TRUE)
+coef(fit, matrix = TRUE)
Coef(fit) # A bug here
-\dontrun{
-plot(interspike, ylim=c(0,120), las=1, font=1, xlab="Spike Number",
- ylab="Inter-Spike Time (ms)", col="blue")
-lines(spikenum[-(1:fit at misc$plag)], fitted(fit), col="green")
-abline(h=mean(interspike), lty=2)
-}
+\dontrun{ with(gdata, plot(interspike, ylim = c(0, 120), las = 1,
+ xlab = "Spike Number", ylab = "Inter-Spike Time (ms)", col = "blue"))
+with(gdata, lines(spikenum[-(1:fit at misc$plag)], fitted(fit), col = "orange"))
+abline(h = mean(with(gdata, interspike)), lty = "dashed", col = "gray") }
}
\keyword{models}
\keyword{regression}
diff --git a/man/genbetaII.Rd b/man/genbetaII.Rd
index c7c141d..58a6189 100644
--- a/man/genbetaII.Rd
+++ b/man/genbetaII.Rd
@@ -7,15 +7,14 @@
generalized beta II distribution.
}
\usage{
-genbetaII(link.a = "loge", link.scale = "loge",
- link.p = "loge", link.q = "loge",
- earg.a=list(), earg.scale=list(), earg.p=list(), earg.q=list(),
- init.a = NULL, init.scale = NULL, init.p = 1, init.q = 1,
+genbetaII(lshape1.a = "loge", lscale = "loge", lshape2.p = "loge", lshape3.q = "loge",
+ eshape1.a = list(), escale = list(), eshape2.p = list(), eshape3.q = list(),
+ ishape1.a = NULL, iscale = NULL, ishape2.p = 1, ishape3.q = 1,
zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link.a, link.scale, link.p, link.q}{
+ \item{lshape1.a, lscale, lshape2.p, lshape3.q}{
Parameter link functions applied to the
shape parameter \code{a},
scale parameter \code{scale},
@@ -25,17 +24,17 @@ genbetaII(link.a = "loge", link.scale = "loge",
See \code{\link{Links}} for more choices.
}
- \item{earg.a, earg.scale, earg.p, earg.q}{
+ \item{eshape1.a, escale, eshape2.p, eshape3.q}{
List. Extra argument for each of the links.
See \code{earg} in \code{\link{Links}} for general information.
}
- \item{init.a, init.scale}{
+ \item{ishape1.a, iscale}{
Optional initial values for \code{a} and \code{scale}.
A \code{NULL} means a value is computed internally.
}
- \item{init.p, init.q}{
+ \item{ishape2.p, ishape3.q}{
Optional initial values for \code{p} and \code{q}.
}
@@ -55,6 +54,9 @@ genbetaII(link.a = "loge", link.scale = "loge",
distributions are all special cases.
Full details can be found in Kleiber and Kotz (2003), and
Brazauskas (2002).
+ The argument names given here are used by other families that
+ are special cases of this family.
+
The 4-parameter generalized beta II distribution has density
\deqn{f(y) = a y^{ap-1} / [b^{ap} B(p,q) \{1 + (y/b)^a\}^{p+q}]}{%
@@ -66,29 +68,36 @@ while the others are shape parameters.
The mean is
\deqn{E(Y) = b \, \Gamma(p + 1/a) \, \Gamma(q - 1/a) / (\Gamma(p) \, \Gamma(q))}{%
E(Y) = b gamma(p + 1/a) gamma(q - 1/a) / ( gamma(p) gamma(q))}
-provided \eqn{-ap < 1 < aq}.
+provided \eqn{-ap < 1 < aq}; these are returned as the fitted values.
+
%The distribution is motivated by the incomplete beta function
%\eqn{B_y(p,q)} which is the integral from 0 to \eqn{y} of the integrand
%\eqn{u^{p-1} (1-u)^{q-1}}{u^(p-1) (1-u)^(q-1)} where \eqn{y>0}.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
+
}
\references{
+
Kleiber, C. and Kotz, S. (2003)
\emph{Statistical Size Distributions in Economics and
Actuarial Sciences},
-Hoboken, NJ: Wiley-Interscience.
+Hoboken, NJ, USA: Wiley-Interscience.
+
Brazauskas, V. (2002)
Fisher information matrix for the Feller-Pareto distribution.
\emph{Statistics & Probability Letters},
\bold{59}, 159--167.
+
}
\author{ T. W. Yee }
@@ -97,14 +106,15 @@ If the self-starting initial values fail, try experimenting
with the initial value arguments, especially those whose
default value is not \code{NULL}.
+
Successful convergence depends on having very
good initial values. This is rather difficult for this distribution!
More improvements could be made here.
+
}
\seealso{
- \code{\link{lino}},
\code{\link{betaff}},
\code{\link{betaII}},
\code{\link{dagum}},
@@ -113,15 +123,17 @@ More improvements could be made here.
\code{\link{lomax}},
\code{\link{invlomax}},
\code{\link{paralogistic}},
- \code{\link{invparalogistic}}.
+ \code{\link{invparalogistic}},
+ \code{\link{lino}}.
+
}
\examples{
-gdata = data.frame(y = rsinmad(n=3000, 4, 6, 2)) # Not very good data!
-fit = vglm(y ~ 1, genbetaII, gdata, trace=TRUE)
-fit = vglm(y ~ 1, genbetaII(init.p=1.0, init.a=4, init.sc=7, init.q=2.3),
- gdata, trace=TRUE, crit="c")
-coef(fit, mat=TRUE)
+gdata = data.frame(y = rsinmad(n = 3000, 4, 6, 2)) # Not very good data!
+fit = vglm(y ~ 1, genbetaII, gdata, trace = TRUE)
+fit = vglm(y ~ 1, data = gdata, trace = TRUE, crit = "coef",
+ genbetaII(ishape2.p = 1, ishape1.a = 4, iscale = 7, ishape3.q = 2.3))
+coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
}
diff --git a/man/gengamma.Rd b/man/gengamma.Rd
index c002cd5..a0405f1 100644
--- a/man/gengamma.Rd
+++ b/man/gengamma.Rd
@@ -131,7 +131,7 @@ gdata = data.frame(x = runif(nn <- 5000))
gdata = transform(gdata, Scale = exp(1), d = exp(0 + 1.2*x),
k = exp(-1 + 2*x))
gdata = transform(gdata, y = rgengamma(nn, scale = Scale, d = d, k = k))
-fit = vglm(y ~ x, gengamma(zero = 1, iscal = 6), gdata, trace = TRUE)
+fit = vglm(y ~ x, gengamma(zero = 1, iscale = 6), gdata, trace = TRUE)
fit = vglm(y ~ x, gengamma(zero = 1), gdata, trace = TRUE, maxit = 50)
coef(fit, matrix = TRUE)
}
diff --git a/man/genpoisson.Rd b/man/genpoisson.Rd
index c367e2c..6bc9335 100644
--- a/man/genpoisson.Rd
+++ b/man/genpoisson.Rd
@@ -70,7 +70,7 @@ for some data sets,
the default link for \code{llambda} is not always appropriate.
-An ordinary Poisson distribution corresponds to \eqn{\lambda=0}{lambda=0}.
+An ordinary Poisson distribution corresponds to \eqn{\lambda = 0}{lambda = 0}.
The mean (returned as the fitted values) is
\eqn{E(Y) = \theta / (1 - \lambda)}
and the variance is \eqn{\theta / (1 - \lambda)^3}.
@@ -118,7 +118,7 @@ New York: Marcel Dekker.
}
\examples{
gdata = data.frame(x2 = runif(nn <- 200))
-gdata = transform(gdata, y = rpois(nn, exp(2-x2))) # Ordinary Poisson data
+gdata = transform(gdata, y = rpois(nn, exp(2 - x2))) # Ordinary Poisson data
fit = vglm(y ~ x2, genpoisson(zero = 1), gdata, trace = TRUE)
coef(fit, matrix = TRUE)
summary(fit)
diff --git a/man/geometric.Rd b/man/geometric.Rd
index d5bbd1f..25d1fcb 100644
--- a/man/geometric.Rd
+++ b/man/geometric.Rd
@@ -6,19 +6,16 @@
Maximum likelihood estimation for the geometric distribution.
}
\usage{
-geometric(link = "logit", earg=list(), expected = TRUE, imethod = 1)
+geometric(link = "logit", earg = list(), expected = TRUE, imethod = 1,
+ iprob = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link}{
- Parameter link function applied to the
+ \item{link, earg}{
+ Parameter link function and extra argument applied to the
parameter \eqn{p}{prob}, which lies in the unit interval.
- See \code{\link{Links}} for more choices.
-
- }
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
+ See \code{\link{Links}} for more choices,
+ and \code{earg} in \code{\link{Links}} for general information.
}
\item{expected}{
@@ -32,6 +29,11 @@ geometric(link = "logit", earg=list(), expected = TRUE, imethod = 1)
If failure to converge occurs try another value.
}
+ \item{iprob}{
+ Optional initial value.
+ See \code{\link{CommonVGAMffArguments}} for more details.
+
+ }
}
\details{
@@ -46,6 +48,9 @@ geometric(link = "logit", earg=list(), expected = TRUE, imethod = 1)
and its variance is \eqn{Var(Y) = (1-p)/p^2}{Var(Y) = (1-prob)/prob^2}.
The geometric distribution is a special case of the
negative binomial distribution (see \code{\link{negbinomial}}).
+ If \eqn{Y} has a geometric distribution with parameter \eqn{p}{prob} then
+ \eqn{Y+1} has a positive-geometric distribution with the same parameter.
+
}
\value{
@@ -53,11 +58,14 @@ geometric(link = "logit", earg=list(), expected = TRUE, imethod = 1)
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
Evans, M., Hastings, N. and Peacock, B. (2000)
\emph{Statistical Distributions},
New York: Wiley-Interscience, Third edition.
+
+
}
\author{ T. W. Yee }
@@ -70,9 +78,11 @@ geometric(link = "logit", earg=list(), expected = TRUE, imethod = 1)
\code{\link[stats]{Geometric}},
\code{\link{betageometric}},
\code{\link{expgeometric}},
+ \code{\link{zageometric}},
\code{\link{zigeometric}},
\code{\link{rbetageom}}.
+
}
\examples{
gdata = data.frame(x2 = runif(nn <- 1000) - 0.5)
diff --git a/man/golf.Rd b/man/golf.Rd
index 365ce4e..2071f3e 100644
--- a/man/golf.Rd
+++ b/man/golf.Rd
@@ -29,9 +29,9 @@ golf(theta, earg = list(lambda = 1), inverse = FALSE, deriv = 0,
If \code{golf()} is used as the link function in
\code{\link{cumulative}} then, if the cutpoints are known, then
one should choose
- \code{reverse=TRUE, parallel=TRUE, intercept.apply=TRUE}.
+ \code{reverse = TRUE, parallel = TRUE, intercept.apply = TRUE}.
If the cutpoints are unknown, then choose
- \code{reverse=TRUE, parallel=TRUE, intercept.apply=FALSE}.
+ \code{reverse = TRUE, parallel = TRUE, intercept.apply = FALSE}.
}
\item{inverse}{
@@ -61,22 +61,27 @@ golf(theta, earg = list(lambda = 1), inverse = FALSE, deriv = 0,
an ordinal response coming from an underlying 2-parameter gamma
distribution.
+
The arguments \code{short} and \code{tag} are used only if
\code{theta} is character.
+
See \code{\link{Links}} for general information about \pkg{VGAM}
link functions.
+
}
\value{
- See Yee (2007) for details.
+ See Yee (2012) for details.
+
}
\references{
- Yee, T. W. (2007)
+ Yee, T. W. (2012)
\emph{Ordinal ordination with normalizing link functions for count data},
(in preparation).
+
}
\author{ Thomas W. Yee }
@@ -88,11 +93,13 @@ golf(theta, earg = list(lambda = 1), inverse = FALSE, deriv = 0,
\code{theta} is too close to 1 or 0,
numerical instabilities may still arise.
+
In terms of the threshold approach with cumulative probabilities for
an ordinal response this link function corresponds to the
gamma distribution (see \code{\link{gamma2}}) that has been
recorded as an ordinal response using known cutpoints.
+
}
\section{Warning }{
Prediction may not work on \code{\link{vglm}} or
@@ -107,24 +114,25 @@ golf(theta, earg = list(lambda = 1), inverse = FALSE, deriv = 0,
\code{\link{nbolf}},
\code{\link{cumulative}}.
+
}
\examples{
-earg = list(lambda=1)
-golf("p", earg=earg, short=FALSE)
-golf("p", earg=earg, tag=TRUE)
+earg = list(lambda = 1)
+golf("p", earg = earg, short = FALSE)
+golf("p", earg = earg, tag = TRUE)
-p = seq(0.02, 0.98, len=201)
-y = golf(p, earg=earg)
-y. = golf(p, earg=earg, deriv=1)
-max(abs(golf(y, earg=earg, inv=TRUE) - p)) # Should be 0
+p = seq(0.02, 0.98, len = 201)
+y = golf(p, earg = earg)
+y. = golf(p, earg = earg, deriv = 1)
+max(abs(golf(y, earg = earg, inv = TRUE) - p)) # Should be 0
\dontrun{
-par(mfrow=c(2,1), las=1)
-plot(p, y, type="l", col="blue", main="golf()")
-abline(h=0, v=0.5, col="red", lty="dashed")
+par(mfrow=c(2,1), las = 1)
+plot(p, y, type = "l", col = "blue", main = "golf()")
+abline(h=0, v=0.5, col = "red", lty = "dashed")
-plot(p, y., type="l", col="blue",
- main="(Reciprocal of) first GOLF derivative")
+plot(p, y., type = "l", col = "blue",
+ main = "(Reciprocal of) first GOLF derivative")
}
@@ -137,18 +145,18 @@ gdata = transform(gdata, y1 = rgamma(nn, shape=lambda, scale=mymu/lambda))
cutpoints = c(-Inf, 10, 20, Inf)
gdata = transform(gdata, cuty = Cut(y1, breaks=cutpoints))
\dontrun{
-par(mfrow=c(1,1), las=1)
+par(mfrow=c(1,1), las = 1)
with(gdata, plot(x2, x3, col=cuty, pch=as.character(cuty))) }
with(gdata, table(cuty) / sum(table(cuty)))
-fit = vglm(cuty ~ x2 + x3, fam = cumulative(link="golf",
- reverse=TRUE, parallel=TRUE, intercept.apply=TRUE,
- mv=TRUE, earg=list(cutpoint=cutpoints[2:3], lambda=lambda)),
- gdata, trace=TRUE)
+fit = vglm(cuty ~ x2 + x3, fam = cumulative(link = "golf",
+ reverse = TRUE, parallel = TRUE, intercept.apply = TRUE,
+ mv = TRUE, earg = list(cutpoint=cutpoints[2:3], lambda=lambda)),
+ gdata, trace = TRUE)
head(fit at y)
head(fitted(fit))
head(predict(fit))
coef(fit)
-coef(fit, matrix=TRUE)
+coef(fit, matrix = TRUE)
constraints(fit)
fit at misc$earg
}
@@ -164,12 +172,12 @@ fit at misc$earg
% mymu = exp( 3 + 1 * x2 - 2 * x3)
% y1 = rnbinom(nn, mu=mymu, size=shape)
% cuty = Cut(y1)
-% fit = vglm(cuty ~ x2 + x3, fam = cumulative(link="golf", rev=TRUE,
-% mv=TRUE, parallel=TRUE, earg=list(lambda=shape)))
+% fit = vglm(cuty ~ x2 + x3, fam = cumulative(link = "golf", rev = TRUE,
+% mv = TRUE, parallel = TRUE, earg = list(lambda=shape)))
% coef(fit)
-% fit = vglm(cuty ~ x2 + x3, fam = cumulative(link="probit", rev=TRUE,
-% mv=TRUE, parallel=TRUE))
-% coef(fit, matrix=TRUE)
+% fit = vglm(cuty ~ x2 + x3, fam = cumulative(link = "probit", rev = TRUE,
+% mv = TRUE, parallel = TRUE))
+% coef(fit, matrix = TRUE)
% coef(fit)
diff --git a/man/gpd.Rd b/man/gpd.Rd
index f491292..1d9c300 100644
--- a/man/gpd.Rd
+++ b/man/gpd.Rd
@@ -224,8 +224,8 @@ gpd(threshold = 0, lscale = "loge", lshape = "logoff", escale = list(),
\examples{
# Simulated data from an exponential distribution (xi = 0)
threshold = 0.5
-gdata = data.frame(y = threshold + rexp(n = 3000, rate = 2))
-fit = vglm(y ~ 1, gpd(threshold = threshold), gdata, trace = TRUE)
+gdata = data.frame(y1 = threshold + rexp(n = 3000, rate = 2))
+fit = vglm(y1 ~ 1, gpd(threshold = threshold), gdata, trace = TRUE)
head(fitted(fit))
coef(fit, matrix = TRUE) # xi should be close to 0
Coef(fit)
@@ -234,29 +234,28 @@ summary(fit)
fit at extra$threshold # Note the threshold is stored here
# Check the 90 percentile
-ii = fit at y < fitted(fit)[1,"90\%"]
-100*table(ii)/sum(table(ii)) # Should be 90%
+ii = depvar(fit) < fitted(fit)[1, "90\%"]
+100 * table(ii) / sum(table(ii)) # Should be 90%
# Check the 95 percentile
-ii = fit at y < fitted(fit)[1,"95\%"]
-100*table(ii)/sum(table(ii)) # Should be 95%
+ii = depvar(fit) < fitted(fit)[1, "95\%"]
+100 * table(ii) / sum(table(ii)) # Should be 95%
-\dontrun{ plot(fit at y, col = "blue", las = 1,
+\dontrun{ plot(depvar(fit), col = "blue", las = 1,
main = "Fitted 90\% and 95\% quantiles")
-matlines(1:length(fit at y), fitted(fit), lty = 2:3, lwd = 2) }
+matlines(1:length(depvar(fit)), fitted(fit), lty = 2:3, lwd = 2) }
# Another example
-threshold = 0
gdata = data.frame(x2 = runif(nn <- 2000))
-xi = exp(-0.8) - 0.5
-gdata = transform(gdata, y = rgpd(nn, scale = exp(1+0.1*x2), shape = xi))
-fit = vglm(y ~ x2, gpd(threshold), gdata, trace = TRUE)
+threshold = 0; xi = exp(-0.8) - 0.5
+gdata = transform(gdata, y2 = rgpd(nn, scale = exp(1+0.1*x2), shape = xi))
+fit = vglm(y2 ~ x2, gpd(threshold), gdata, trace = TRUE)
coef(fit, matrix = TRUE)
\dontrun{ # Nonparametric fits
-gdata = transform(gdata, yy = y + rnorm(nn, sd = 0.1))
+gdata = transform(gdata, yy = y2 + rnorm(nn, sd = 0.1))
# Not so recommended:
fit1 = vgam(yy ~ s(x2), gpd(threshold), gdata, trace = TRUE)
par(mfrow = c(2,1))
diff --git a/man/usgrain.Rd b/man/grain.us.Rd
similarity index 87%
rename from man/usgrain.Rd
rename to man/grain.us.Rd
index 33afbfb..a0a2e0c 100644
--- a/man/usgrain.Rd
+++ b/man/grain.us.Rd
@@ -1,12 +1,12 @@
-\name{usgrain}
-\alias{usgrain}
+\name{grain.us}
+\alias{grain.us}
\docType{data}
\title{Grain Prices Data in USA }
\description{
A 4-column matrix.
}
-\usage{data(usgrain)}
+\usage{data(grain.us)}
\format{
The columns are:
\describe{
@@ -39,7 +39,7 @@ Nested reduced-rank autoregressive models for multiple time series.
}
\examples{
-cgrain = scale(usgrain, scale = FALSE) # Center the time series only
+cgrain = scale(grain.us, scale = FALSE) # Center the time series only
fit = vglm(cgrain ~ 1, rrar(Rank = c(4, 1)),
eps = 1e-3, step = 0.5, trace = TRUE, maxit = 40)
summary(fit)
diff --git a/man/grc.Rd b/man/grc.Rd
index a782d3e..04ac925 100644
--- a/man/grc.Rd
+++ b/man/grc.Rd
@@ -12,8 +12,9 @@
grc(y, Rank = 1, Index.corner = 2:(1 + Rank),
szero = 1, summary.arg = FALSE, h.step = 1e-04, ...)
rcam(y, family = poissonff, Rank = 0, Musual = NULL,
- weights = NULL, Index.corner = if (!Rank) NULL else
- 1 + Musual * (1:Rank), rprefix = "Row.", cprefix = "Col.",
+ weights = NULL, which.lp = 1,
+ Index.corner = if (!Rank) NULL else 1 + Musual * (1:Rank),
+ rprefix = "Row.", cprefix = "Col.", offset = 0,
szero = if (!Rank) NULL else {
if (Musual == 1) 1 else setdiff(1:(Musual * ncol(y)),
c(1 + (1:ncol(y)) * Musual, Index.corner))
@@ -32,8 +33,9 @@ rcam(y, family = poissonff, Rank = 0, Musual = NULL,
}
\item{family}{
A \pkg{VGAM} family function.
- The first linear/additive predictor is fitted using main effects plus
- an optional rank-\code{Rank} interaction term.
+ By default, the first linear/additive predictor is fitted
+ using main effects plus an optional rank-\code{Rank}
+ interaction term.
Not all family functions are suitable or make sense.
All other linear/additive predictors are fitted using an intercept-only,
so it has a common value over all rows and columns.
@@ -67,6 +69,14 @@ rcam(y, family = poissonff, Rank = 0, Musual = NULL,
}
+ \item{which.lp}{
+ Single integer.
+ Specifies which linear predictor is modelled as the sum of
+ an intercept, row effect, column effect plus an optional interaction term.
+ It should be one value from the set \code{1:Musual}.
+
+
+ }
\item{Index.corner}{
A vector of \code{Rank} integers.
These are used to store the \code{Rank} by \code{Rank}
@@ -80,6 +90,11 @@ rcam(y, family = poissonff, Rank = 0, Musual = NULL,
For labelling the indicator variables.
}
+ \item{offset}{
+ Numeric. Either a matrix of the right dimension, else
+ a single numeric expanded into such a matrix.
+
+ }
\item{szero}{
An integer from the set \{1,\ldots,\code{min(nrow(y), ncol(y))}\},
specifying the row that is used as the structural zero.
@@ -118,7 +133,7 @@ rcam(y, family = poissonff, Rank = 0, Musual = NULL,
}
\details{
Goodman's RC association model fits a reduced-rank approximation
- to a table of counts. The log of each cell mean is decomposed as an
+ to a table of counts. The log of each cell mean is decomposed as an
intercept plus a row effect plus a column effect plus a reduced-rank
component. The latter can be collectively written \code{A \%*\% t(C)},
the product of two `thin' matrices.
@@ -126,7 +141,7 @@ rcam(y, family = poissonff, Rank = 0, Musual = NULL,
By default, the first column and row of the interaction matrix
\code{A \%*\% t(C)} is chosen
to be structural zeros, because \code{szero = 1}.
-This means the first row of \code{A} are all zeros.
+This means the first row of \code{A} are all zeros.
This function uses \code{options()$contrasts} to set up the row and
@@ -177,6 +192,11 @@ Reduced-rank vector generalized linear models.
\bold{3}, 15--41.
+Yee, T. W. and Hadi, A. F. (2012)
+Row-column association models
+\emph{In preparation}.
+
+
Goodman, L. A. (1981)
Association models and canonical correlation in the analysis
of cross-classifications having ordered categories.
@@ -227,20 +247,22 @@ assistance from Alfian F. Hadi.
may have bugs.
Quite a lot of expertise is needed when fitting and in its
interpretion thereof. For example, the constraint
- matrices applies the reduced-rank regression to the first linear
- predictor and the other linear predictors are intercept-only and
+ matrices applies the reduced-rank regression to the first
+ (see \code{which.lp})
+ linear predictor and the other linear predictors are intercept-only and
have a common value throughout the entire data set.
- This means that \code{family =} \code{\link{zipoissonff}} is
+ This means that, by default, \code{family =} \code{\link{zipoissonff}} is
appropriate but not
\code{family =} \code{\link{zipoisson}}.
+ Else set \code{family =} \code{\link{zipoisson}} and \code{which.lp = 2}.
To understand what is going on, do examine the constraint
- matrices of the fitted object, and reconcile this with Equations
- (4.3) to (4.5) of Yee and Hastie (2003).
+ matrices of the fitted object, and reconcile this with
+ Equations (4.3) to (4.5) of Yee and Hastie (2003).
The functions temporarily create a permanent data frame
called \code{.grc.df} or \code{.rcam.df}, which used
- to be needed by \code{summary.rrvglm()}. Then these
+ to be needed by \code{summary.rrvglm()}. Then these
data frames are deleted before exiting the function.
If an error occurs, then the data frames may be present
in the workspace.
@@ -264,11 +286,11 @@ assistance from Alfian F. Hadi.
\code{\link{olympic}},
\code{\link{poissonff}}.
+
}
\examples{
-# Some undergraduate student enrolments at the University of Auckland in 1990
-grc1 <- grc(auuc)
+grc1 <- grc(auuc) # Undergraduate enrolments at Auckland University in 1990
fitted(grc1)
summary(grc1)
@@ -281,7 +303,7 @@ summary(grc2)
top10 <- head(olympic, n = 10)
oly1 <- with(top10, grc(cbind(gold, silver, bronze)))
round(fitted(oly1))
-round(resid(oly1, type = "response"), dig = 1) # Response residuals
+round(resid(oly1, type = "response"), dig = 1) # Response residuals
summary(oly1)
Coef(oly1)
@@ -289,7 +311,7 @@ Coef(oly1)
# Roughly median polish
rcam0 <- rcam(auuc, fam = alaplace2(tau = 0.5, intparloc = TRUE), trace = TRUE)
round(fitted(rcam0), dig = 0)
-round(100 * (fitted(rcam0) - auuc) / auuc, dig = 0) # Discrepancy
+round(100 * (fitted(rcam0) - auuc) / auuc, dig = 0) # Discrepancy
rcam0 at y
round(coef(rcam0, matrix = TRUE), dig = 2)
print(Coef(rcam0, matrix = TRUE), dig = 3)
@@ -299,7 +321,7 @@ names(constraints(rcam0))
# Compare with medpolish():
(med.a <- medpolish(auuc))
fv <- med.a$overall + outer(med.a$row, med.a$col, "+")
-round(100 * (fitted(rcam0) - fv) / fv) # Hopefully should be all 0s
+round(100 * (fitted(rcam0) - fv) / fv) # Hopefully should be all 0s
}
\keyword{models}
\keyword{regression}
diff --git a/man/gumbelIbiv.Rd b/man/gumbelIbiv.Rd
index 5f4399e..ea037c2 100644
--- a/man/gumbelIbiv.Rd
+++ b/man/gumbelIbiv.Rd
@@ -64,7 +64,7 @@ gumbelIbiv(lapar="identity", earg=list(), iapar=NULL, imethod=1)
Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005)
\emph{Extreme Value and Related Models with Applications in Engineering and Science},
-Hoboken, N.J.: Wiley-Interscience.
+Hoboken, NJ, USA: Wiley-Interscience.
}
\author{ T. W. Yee }
@@ -85,9 +85,9 @@ Hoboken, N.J.: Wiley-Interscience.
\examples{
nn = 1000
gdata = data.frame(y1 = rexp(nn), y2 = rexp(nn))
-\dontrun{with(gdata, plot(cbind(y1,y2)))}
-fit = vglm(cbind(y1,y2) ~ 1, fam=gumbelIbiv, gdata, trace=TRUE)
-coef(fit, matrix=TRUE)
+\dontrun{ with(gdata, plot(cbind(y1,y2))) }
+fit = vglm(cbind(y1, y2) ~ 1, fam = gumbelIbiv, gdata, trace = TRUE)
+coef(fit, matrix = TRUE)
Coef(fit)
head(fitted(fit))
}
diff --git a/man/gumbelUC.Rd b/man/gumbelUC.Rd
index f2ca2e1..4aa028f 100644
--- a/man/gumbelUC.Rd
+++ b/man/gumbelUC.Rd
@@ -31,7 +31,7 @@ rgumbel(n, location = 0, scale = 1)
of the Gumbel distribution (see \bold{Details} below). }
\item{log}{
Logical.
- If \code{log=TRUE} then the logarithm of the density is returned.
+ If \code{log = TRUE} then the logarithm of the density is returned.
}
diff --git a/man/hyperg.Rd b/man/hyperg.Rd
index f32dea4..0a9825e 100644
--- a/man/hyperg.Rd
+++ b/man/hyperg.Rd
@@ -10,7 +10,7 @@
}
\usage{
-hyperg(N=NULL, D=NULL, lprob="logit", earg=list(), iprob=NULL)
+hyperg(N = NULL, D = NULL, lprob = "logit", earg = list(), iprob = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -46,7 +46,7 @@ hyperg(N=NULL, D=NULL, lprob="logit", earg=list(), iprob=NULL)
\details{
Consider the scenario from
- \code{\link[stats:Hypergeometric]{Hypergeometric}} where there
+ \code{\link[stats]{dhyper}} where there
are \eqn{N=m+n} balls in an urn, where \eqn{m} are white and \eqn{n}
are black. A simple random sample (i.e., \emph{without} replacement) of
\eqn{k} balls is taken.
@@ -58,6 +58,7 @@ hyperg(N=NULL, D=NULL, lprob="logit", earg=list(), iprob=NULL)
The parameter to be estimated is the population proportion of
white balls, viz. \eqn{prob = m/(m+n)}.
+
Depending on which one of \code{N} and \code{D} is inputted, the
estimate of the other parameter can be obtained from the equation
\eqn{prob = m/(m+n)}, or equivalently, \code{prob = D/N}. However,
@@ -68,6 +69,7 @@ hyperg(N=NULL, D=NULL, lprob="logit", earg=list(), iprob=NULL)
i.e., at \code{trunc(Nhat)} and \code{ceiling(Nhat)} where \code{Nhat}
is the (real) estimate of \eqn{N}.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -78,26 +80,33 @@ hyperg(N=NULL, D=NULL, lprob="logit", earg=list(), iprob=NULL)
\code{\link{cqo}},
and \code{\link{cao}}.
+
}
\references{
Evans, M., Hastings, N. and Peacock, B. (2000)
\emph{Statistical Distributions},
New York: Wiley-Interscience, Third edition.
+
+
}
\author{ Thomas W. Yee }
\note{
- The response can be of one of three formats: a factor (first level taken
- as success), a vector of proportions of success, or a 2-column matrix
- (first column = successes) of counts. The argument \code{weights}
- in the modelling function can also be specified. In particular, for a
- general vector of proportions, you will need to specify \code{weights}
- because the number of trials is needed.
+ The response can be of one of three formats: a factor (first
+ level taken as success), a vector of proportions of success,
+ or a 2-column matrix (first column = successes) of counts.
+ The argument \code{weights} in the modelling function can also be
+ specified. In particular, for a general vector of proportions,
+ you will need to specify \code{weights} because the number of
+ trials is needed.
+
}
\seealso{
- \code{\link[stats:Hypergeometric]{Hypergeometric}},
+ \code{\link[stats]{dhyper}},
\code{\link{binomialff}}.
+
+
}
\section{Warning }{
No checking is done to ensure that certain values are within range,
@@ -108,20 +117,20 @@ New York: Wiley-Interscience, Third edition.
\examples{
nn = 100
m = 5 # number of white balls in the population
-k = rep(4, len=nn) # sample sizes
+k = rep(4, len = nn) # sample sizes
n = 4 # number of black balls in the population
-y = rhyper(nn=nn, m=m, n=n, k=k)
+y = rhyper(nn = nn, m = m, n = n, k = k)
yprop = y / k # sample proportions
# N is unknown, D is known. Both models are equivalent:
-fit = vglm(cbind(y,k-y) ~ 1, hyperg(D=m), trace=TRUE, crit="c")
-fit = vglm(yprop ~ 1, hyperg(D=m), weight=k, trace=TRUE, crit="c")
+fit = vglm(cbind(y,k-y) ~ 1, hyperg(D = m), trace = TRUE, crit = "c")
+fit = vglm(yprop ~ 1, hyperg(D=m), weight = k, trace = TRUE, crit = "c")
# N is known, D is unknown. Both models are equivalent:
-fit = vglm(cbind(y,k-y) ~ 1, hyperg(N=m+n), trace=TRUE, crit="l")
-fit = vglm(yprop ~ 1, hyperg(N=m+n), weight=k, trace=TRUE, crit="l")
+fit = vglm(cbind(y,k-y) ~ 1, hyperg(N = m+n), trace = TRUE, crit = "l")
+fit = vglm(yprop ~ 1, hyperg(N = m+n), weight = k, trace = TRUE, crit = "l")
-coef(fit, matrix=TRUE)
+coef(fit, matrix = TRUE)
Coef(fit) # Should be equal to the true population proportion
unique(m / (m+n)) # The true population proportion
fit at extra
diff --git a/man/hzetaUC.Rd b/man/hzetaUC.Rd
index b4d8d6c..a9fbb47 100644
--- a/man/hzetaUC.Rd
+++ b/man/hzetaUC.Rd
@@ -32,7 +32,7 @@ rhzeta(n, alpha)
}
\item{log}{
Logical.
- If \code{log=TRUE} then the logarithm of the density is returned.
+ If \code{log = TRUE} then the logarithm of the density is returned.
}
}
diff --git a/man/iam.Rd b/man/iam.Rd
index 2fa71e6..0f0898d 100644
--- a/man/iam.Rd
+++ b/man/iam.Rd
@@ -9,7 +9,7 @@
}
\usage{
-iam(j, k, M, hbw = M, both = FALSE, diagonal = TRUE)
+iam(j, k, M, hbw = M, both = FALSE, diag = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -37,7 +37,7 @@ iam(j, k, M, hbw = M, both = FALSE, diagonal = TRUE)
See below for more details.
}
- \item{diagonal}{
+ \item{diag}{
Logical. Return the indices for the diagonal elements?
If \code{FALSE} then only the strictly upper triangular part of the matrix
elements are used.
@@ -66,26 +66,30 @@ iam(j, k, M, hbw = M, both = FALSE, diagonal = TRUE)
This function has a dual purpose depending on the value of \code{both}.
If \code{both=FALSE} then the column number corresponding
to the \code{j}-\code{k} element of the matrix is returned.
- If \code{both=TRUE} then \code{j} and \code{k} are ignored and a list
+ If \code{both = TRUE} then \code{j} and \code{k} are ignored and a list
with the following components are returned.
+
\item{row.index}{
The row indices of the upper triangular part of the
matrix (This may or may not include the diagonal elements, depending
on the argument \code{diagonal}).
+
}
\item{col.index}{
The column indices of the upper triangular part of the
matrix (This may or may not include the diagonal elements, depending
on the argument \code{diagonal}).
+
}
}
\references{
The website \url{http://www.stat.auckland.ac.nz/~yee} contains
some additional information.
+
}
\author{ T. W. Yee }
\note{
@@ -94,25 +98,28 @@ iam(j, k, M, hbw = M, both = FALSE, diagonal = TRUE)
whose \eqn{M} is determined by the data, e.g., \code{\link{dirichlet}},
\code{\link{multinomial}}.
+
}
\seealso{
\code{\link{vglmff-class}}.
%\code{ima}.
+
+
}
\examples{
-iam(1, 2, M=3) # The 4th column represents element (1,2) of a 3x3 matrix
-iam(NULL, NULL, M=3, both=TRUE) # Return the row and column indices
+iam(1, 2, M = 3) # The 4th column represents element (1,2) of a 3x3 matrix
+iam(NULL, NULL, M = 3, both = TRUE) # Return the row and column indices
dirichlet()@weight
M = 4
-temp1 = iam(NA, NA, M=M, both=TRUE)
+temp1 = iam(NA, NA, M = M, both = TRUE)
mat1 = matrix(NA, M, M)
mat1[cbind(temp1$row, temp1$col)] = 1:length(temp1$row)
mat1 # More commonly used
-temp2 = iam(NA, NA, M=M, both=TRUE, diagonal=FALSE)
+temp2 = iam(NA, NA, M = M, both = TRUE, diag = FALSE)
mat2 = matrix(NA, M, M)
mat2[cbind(temp2$row, temp2$col)] = 1:length(temp2$row)
mat2 # Rarely used
diff --git a/man/inv.gaussianff.Rd b/man/inv.gaussianff.Rd
index bf9b0f3..82aa5ff 100644
--- a/man/inv.gaussianff.Rd
+++ b/man/inv.gaussianff.Rd
@@ -53,6 +53,7 @@ inv.gaussianff(lmu = "loge", llambda = "loge",
\eqn{\eta_2=\log(\lambda)}{eta2=log(lambda)}.
The mean is returned as the fitted values.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -60,6 +61,7 @@ inv.gaussianff(lmu = "loge", llambda = "loge",
\code{\link{rrvglm}}
and \code{\link{vgam}}.
+
}
\references{
Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994)
@@ -80,6 +82,7 @@ New York: Wiley-Interscience, Third edition.
function is different from that approach in that it estimates both
parameters by full maximum likelihood estimation.
+
}
\seealso{
@@ -87,18 +90,20 @@ New York: Wiley-Interscience, Third edition.
\code{\link{wald}},
\code{\link{bisa}}.
+
The \R{} package \pkg{SuppDists} has several functions for evaluating
the density, distribution function, quantile function and generating
random numbers from the inverse Gaussian distribution.
+
}
\examples{
-idat <- data.frame(x2 = runif(nn <- 1000))
-idat <- transform(idat, mymu = exp(2 + 1 * x2),
- Lambda = exp(2 + 1 * x2))
-idat <- transform(idat, y = rinv.gaussian(nn, mu = mymu, lambda = Lambda))
-fit1 <- vglm(y ~ x2, inv.gaussianff, idat, trace = TRUE)
-rrig <- rrvglm(y ~ x2, inv.gaussianff, idat, trace = TRUE)
+idata <- data.frame(x2 = runif(nn <- 1000))
+idata <- transform(idata, mymu = exp(2 + 1 * x2),
+ Lambda = exp(2 + 1 * x2))
+idata <- transform(idata, y = rinv.gaussian(nn, mu = mymu, lambda = Lambda))
+fit1 <- vglm(y ~ x2, inv.gaussianff, idata, trace = TRUE)
+rrig <- rrvglm(y ~ x2, inv.gaussianff, idata, trace = TRUE)
coef(fit1, matrix = TRUE)
coef(rrig, matrix = TRUE)
Coef(rrig)
diff --git a/man/invlomax.Rd b/man/invlomax.Rd
index 9807fc8..32400e7 100644
--- a/man/invlomax.Rd
+++ b/man/invlomax.Rd
@@ -7,25 +7,25 @@
inverse Lomax distribution.
}
\usage{
-invlomax(link.scale = "loge", link.p = "loge",
- earg.scale=list(), earg.p=list(),
- init.scale = NULL, init.p = 1, zero = NULL)
+invlomax(lscale = "loge", lshape2.p = "loge",
+ escale = list(), eshape2.p = list(),
+ iscale = NULL, ishape2.p = 1, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link.scale, link.p}{
+ \item{lscale, lshape2.p}{
Parameter link functions applied to the
(positive) scale parameter \code{scale} and
(positive) shape parameter \code{p}.
See \code{\link{Links}} for more choices.
}
- \item{earg.scale, earg.p}{
+ \item{escale, eshape2.p}{
List. Extra argument for each of the links.
See \code{earg} in \code{\link{Links}} for general information.
}
- \item{init.scale, init.p}{
+ \item{iscale, ishape2.p}{
Optional initial values for \code{scale} and \code{p}.
}
@@ -52,7 +52,9 @@ The inverse Lomax distribution has density
for \eqn{b > 0}, \eqn{p > 0}, \eqn{y > 0}.
Here, \eqn{b} is the scale parameter \code{scale},
and \code{p} is a shape parameter.
-The mean does not seem to exist.
+The mean does not exist; \code{NA}s are returned as the fitted values.
+
+
}
@@ -61,12 +63,15 @@ The mean does not seem to exist.
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
+
Kleiber, C. and Kotz, S. (2003)
\emph{Statistical Size Distributions in Economics and
Actuarial Sciences},
-Hoboken, NJ: Wiley-Interscience.
+Hoboken, NJ, USA: Wiley-Interscience.
+
}
@@ -76,6 +81,7 @@ If the self-starting initial values fail, try experimenting
with the initial value arguments, especially those whose
default value is not \code{NULL}.
+
}
\seealso{
@@ -88,15 +94,19 @@ default value is not \code{NULL}.
\code{\link{lomax}},
\code{\link{paralogistic}},
\code{\link{invparalogistic}}.
+
+
}
\examples{
-y = rinvlomax(n=2000, 6, 2)
-fit = vglm(y ~ 1, invlomax, trace=TRUE)
-fit = vglm(y ~ 1, invlomax, trace=TRUE, crit="c")
-coef(fit, mat=TRUE)
+idata = data.frame(y = rinvlomax(n = 2000, 6, 2))
+fit = vglm(y ~ 1, invlomax, idata, trace = TRUE)
+fit = vglm(y ~ 1, invlomax, idata, trace = TRUE, crit = "coef")
+coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
}
\keyword{models}
\keyword{regression}
+
+
diff --git a/man/InvlomaxUC.Rd b/man/invlomaxUC.Rd
similarity index 75%
rename from man/InvlomaxUC.Rd
rename to man/invlomaxUC.Rd
index 839228e..d60bb88 100644
--- a/man/InvlomaxUC.Rd
+++ b/man/invlomaxUC.Rd
@@ -12,21 +12,21 @@
}
\usage{
-dinvlomax(x, scale=1, p.arg, log = FALSE)
-pinvlomax(q, scale=1, p.arg)
-qinvlomax(p, scale=1, p.arg)
-rinvlomax(n, scale=1, p.arg)
+dinvlomax(x, scale = 1, shape2.p, log = FALSE)
+pinvlomax(q, scale = 1, shape2.p)
+qinvlomax(p, scale = 1, shape2.p)
+rinvlomax(n, scale = 1, shape2.p)
}
\arguments{
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
\item{n}{number of observations. If \code{length(n) > 1}, the length
is taken to be the number required.}
- \item{p.arg}{shape parameter.}
+ \item{shape2.p}{shape parameter.}
\item{scale}{scale parameter.}
\item{log}{
Logical.
- If \code{log=TRUE} then the logarithm of the density is returned.
+ If \code{log = TRUE} then the logarithm of the density is returned.
}
@@ -36,30 +36,39 @@ rinvlomax(n, scale=1, p.arg)
\code{pinvlomax} gives the distribution function,
\code{qinvlomax} gives the quantile function, and
\code{rinvlomax} generates random deviates.
+
}
\references{
+
Kleiber, C. and Kotz, S. (2003)
\emph{Statistical Size Distributions in Economics and
Actuarial Sciences},
-Hoboken, NJ: Wiley-Interscience.
+Hoboken, NJ, USA: Wiley-Interscience.
+
}
\author{ T. W. Yee }
\details{
See \code{\link{invlomax}}, which is the \pkg{VGAM} family function
for estimating the parameters by maximum likelihood estimation.
+
+
}
\note{
The inverse Lomax distribution is a special case of the 4-parameter
generalized beta II distribution.
+
+
}
\seealso{
\code{\link{invlomax}},
\code{\link{genbetaII}}.
+
+
}
\examples{
-y = rinvlomax(n=1000, 6, 2)
-fit = vglm(y ~ 1, invlomax, trace=TRUE, crit="c")
-coef(fit, mat=TRUE)
+idata = data.frame(y = rinvlomax(n = 1000, 6, 2))
+fit = vglm(y ~ 1, invlomax, idata, trace = TRUE, crit = "coef")
+coef(fit, matrix = TRUE)
Coef(fit)
}
\keyword{distribution}
diff --git a/man/invparalogistic.Rd b/man/invparalogistic.Rd
index f52df9f..1dbeb2c 100644
--- a/man/invparalogistic.Rd
+++ b/man/invparalogistic.Rd
@@ -7,25 +7,25 @@
inverse paralogistic distribution.
}
\usage{
-invparalogistic(link.a = "loge", link.scale = "loge",
- earg.a=list(), earg.scale=list(),
- init.a = 1, init.scale = NULL, zero = NULL)
+invparalogistic(lshape1.a = "loge", lscale = "loge",
+ eshape1.a = list(), escale = list(),
+ ishape1.a = 2, iscale = NULL, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link.a, link.scale}{
+ \item{lshape1.a, lscale}{
Parameter link functions applied to the
(positive) shape parameter \code{a} and
(positive) scale parameter \code{scale}.
See \code{\link{Links}} for more choices.
}
- \item{earg.a, earg.scale}{
+ \item{eshape1.a, escale}{
List. Extra argument for each of the links.
See \code{earg} in \code{\link{Links}} for general information.
}
- \item{init.a, init.scale}{
+ \item{ishape1.a, iscale}{
Optional initial values for \code{a} and \code{scale}.
}
@@ -54,7 +54,7 @@ and \eqn{a} is the shape parameter.
The mean is
\deqn{E(Y) = b \, \Gamma(a + 1/a) \, \Gamma(1 - 1/a) / \Gamma(a)}{%
E(Y) = b gamma(a + 1/a) gamma(1 - 1/a) / gamma(a)}
-provided \eqn{a > 1}.
+provided \eqn{a > 1}; these are returned as the fitted values.
}
@@ -63,12 +63,15 @@ provided \eqn{a > 1}.
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
+
Kleiber, C. and Kotz, S. (2003)
\emph{Statistical Size Distributions in Economics and
Actuarial Sciences},
-Hoboken, NJ: Wiley-Interscience.
+Hoboken, NJ, USA: Wiley-Interscience.
+
}
@@ -78,6 +81,7 @@ If the self-starting initial values fail, try experimenting
with the initial value arguments, especially those whose
default value is not \code{NULL}.
+
}
\seealso{
@@ -90,14 +94,16 @@ default value is not \code{NULL}.
\code{\link{invlomax}},
\code{\link{lomax}},
\code{\link{paralogistic}}.
+
+
}
\examples{
-y = rinvparalogistic(n=3000, 4, 6)
-fit = vglm(y ~ 1, invparalogistic, trace=TRUE)
-fit = vglm(y ~ 1, invparalogistic(init.a=2.7, init.sc=3.3),
- trace=TRUE, crit="c")
-coef(fit, mat=TRUE)
+idata = data.frame(y = rinvparalogistic(n = 3000, 4, 6))
+fit = vglm(y ~ 1, invparalogistic, idata, trace = TRUE)
+fit = vglm(y ~ 1, invparalogistic(ishape1.a = 2.7, iscale = 3.3),
+ idata, trace = TRUE, crit = "coef")
+coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
}
diff --git a/man/InvparalogisticUC.Rd b/man/invparalogisticUC.Rd
similarity index 74%
rename from man/InvparalogisticUC.Rd
rename to man/invparalogisticUC.Rd
index 9ec9ada..efe5ff1 100644
--- a/man/InvparalogisticUC.Rd
+++ b/man/invparalogisticUC.Rd
@@ -12,21 +12,21 @@
}
\usage{
-dinvparalogistic(x, a, scale=1, log=FALSE)
-pinvparalogistic(q, a, scale=1)
-qinvparalogistic(p, a, scale=1)
-rinvparalogistic(n, a, scale=1)
+dinvparalogistic(x, shape1.a, scale = 1, log = FALSE)
+pinvparalogistic(q, shape1.a, scale = 1)
+qinvparalogistic(p, shape1.a, scale = 1)
+rinvparalogistic(n, shape1.a, scale = 1)
}
\arguments{
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
\item{n}{number of observations. If \code{length(n) > 1}, the length
is taken to be the number required.}
- \item{a}{shape parameter.}
+ \item{shape1.a}{shape parameter.}
\item{scale}{scale parameter.}
\item{log}{
Logical.
- If \code{log=TRUE} then the logarithm of the density is returned.
+ If \code{log = TRUE} then the logarithm of the density is returned.
}
@@ -36,30 +36,40 @@ rinvparalogistic(n, a, scale=1)
\code{pinvparalogistic} gives the distribution function,
\code{qinvparalogistic} gives the quantile function, and
\code{rinvparalogistic} generates random deviates.
+
}
\references{
+
Kleiber, C. and Kotz, S. (2003)
\emph{Statistical Size Distributions in Economics and
Actuarial Sciences},
-Hoboken, NJ: Wiley-Interscience.
+Hoboken, NJ, USA: Wiley-Interscience.
+
}
\author{ T. W. Yee }
\details{
See \code{\link{invparalogistic}}, which is the \pkg{VGAM} family function
for estimating the parameters by maximum likelihood estimation.
+
+
}
\note{
The inverse paralogistic distribution is a special case of the 4-parameter
generalized beta II distribution.
+
+
}
\seealso{
\code{\link{invparalogistic}},
\code{\link{genbetaII}}.
+
+
}
\examples{
-y = rinvparalogistic(n=3000, 4, 6)
-fit = vglm(y ~ 1, invparalogistic(init.a=2.1), trace=TRUE, crit="c")
-coef(fit, mat=TRUE)
+idata = data.frame(y = rinvparalogistic(n = 3000, 4, 6))
+fit = vglm(y ~ 1, invparalogistic(ishape1.a = 2.1),
+ idata, trace = TRUE, crit = "coef")
+coef(fit, matrix = TRUE)
Coef(fit)
}
\keyword{distribution}
diff --git a/man/lms.bcg.Rd b/man/lms.bcg.Rd
index c0e14a2..14eda24 100644
--- a/man/lms.bcg.Rd
+++ b/man/lms.bcg.Rd
@@ -93,14 +93,14 @@ contains further information and examples.
\code{\link{qtplot.lmscreg}},
\code{\link{deplot.lmscreg}},
\code{\link{cdf.lmscreg}},
- \code{\link{bminz}},
+ \code{\link{bmi.nz}},
\code{\link{amlexponential}}.
}
\examples{
# This converges, but deplot(fit) and qtplot(fit) do not work
-fit0 = vglm(BMI ~ bs(age, df = 4), lms.bcg, bminz, trace = TRUE)
+fit0 = vglm(BMI ~ bs(age, df = 4), lms.bcg, bmi.nz, trace = TRUE)
coef(fit0, matrix = TRUE)
\dontrun{
par(mfrow = c(1, 1))
@@ -109,16 +109,16 @@ plotvgam(fit0, se = TRUE) # Plot mu function (only)
# Use a trick: fit0 is used for initial values for fit1.
fit1 = vgam(BMI ~ s(age, df = c(4, 2)), etastart = predict(fit0),
- lms.bcg(zero = 1), bminz, trace = TRUE)
+ lms.bcg(zero = 1), bmi.nz, trace = TRUE)
# Difficult to get a model that converges.
# Here, we prematurely stop iterations because it fails near the solution.
fit2 = vgam(BMI ~ s(age, df = c(4, 2)), maxit = 4,
- lms.bcg(zero = 1, ilam = 3), bminz, trace = TRUE)
+ lms.bcg(zero = 1, ilam = 3), bmi.nz, trace = TRUE)
summary(fit1)
head(predict(fit1))
head(fitted(fit1))
-head(bminz)
+head(bmi.nz)
# Person 1 is near the lower quartile of BMI amongst people his age
head(cdf(fit1))
diff --git a/man/lms.bcn.Rd b/man/lms.bcn.Rd
index 676c82b..8e3f566 100644
--- a/man/lms.bcn.Rd
+++ b/man/lms.bcn.Rd
@@ -141,6 +141,7 @@ viz. \code{zero = c(1,3)}.
\code{\link{rrvglm}}
and \code{\link{vgam}}.
+
}
\references{
Cole, T. J. and Green, P. J. (1992)
@@ -202,10 +203,12 @@ contains further information and examples.
stopping the iterations by assigning \code{maxits} to be the iteration
number corresponding to the highest likelihood value.
+
One trick is to fit a simple model and use it to provide
initial values for a more complex model; see in the
examples below.
+
}
\seealso{
\code{\link{lms.bcg}},
@@ -213,7 +216,7 @@ contains further information and examples.
\code{\link{qtplot.lmscreg}},
\code{\link{deplot.lmscreg}},
\code{\link{cdf.lmscreg}},
- \code{\link{bminz}},
+% \code{\link{bmi.nz}},
\code{\link{alaplace1}},
\code{\link{amlnormal}},
\code{\link{denorm}},
@@ -222,23 +225,31 @@ contains further information and examples.
}
\examples{
-fit = vgam(BMI ~ s(age, df = c(4,2)), lms.bcn(zero = 1), bminz, trace = TRUE)
+mysubset = subset(xs.nz, sex == "M" & ethnic == "1" & Study1)
+mysubset = transform(mysubset, BMI = weight / height^2)
+BMIdata = mysubset[, c("age", "BMI")]
+BMIdata = na.omit(BMIdata)
+BMIdata = subset(BMIdata, BMI < 80 & age < 65) # Delete an outlier
+summary(BMIdata)
+
+fit = vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), BMIdata, trace = TRUE)
+
head(predict(fit))
head(fitted(fit))
-head(bminz)
-head(cdf(fit)) # Person 1 is near lower BMI quartile amongst his age group
-colMeans(c(fit at y) < fitted(fit)) # Sample proportions below the quantiles
+head(BMIdata)
+head(cdf(fit)) # Person 56 is probably overweight, given his age
+colMeans(c(depvar(fit)) < fitted(fit)) # Sample proportions below the quantiles
# Convergence problems? Try this trick: fit0 is a simpler model used for fit1
-fit0 = vgam(BMI ~ s(age, df = 4), lms.bcn(zero = c(1,3)), bminz, trace = TRUE)
-fit1 = vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), bminz,
+fit0 = vgam(BMI ~ s(age, df = 4), lms.bcn(zero = c(1,3)), BMIdata, trace = TRUE)
+fit1 = vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), BMIdata,
etastart = predict(fit0), trace = TRUE)
\dontrun{
# Quantile plot
par(bty = "l", mar = c(5, 4, 4, 3) + 0.1, xpd = TRUE)
qtplot(fit, percentiles = c(5, 50, 90, 99), main = "Quantiles",
- xlim = c(15, 90), las = 1, ylab = "BMI", lwd = 2, lcol = 4)
+ xlim = c(15, 66), las = 1, ylab = "BMI", lwd = 2, lcol = 4)
# Density plot
ygrid = seq(15, 43, len = 100) # BMI ranges
diff --git a/man/lms.yjn.Rd b/man/lms.yjn.Rd
index 9570f19..5a9535d 100644
--- a/man/lms.yjn.Rd
+++ b/man/lms.yjn.Rd
@@ -153,15 +153,15 @@ The generic function \code{predict}, when applied to a
\code{\link{qtplot.lmscreg}},
\code{\link{deplot.lmscreg}},
\code{\link{cdf.lmscreg}},
-\code{\link{bminz}},
+\code{\link{bmi.nz}},
\code{\link{amlnormal}}.
}
\examples{
-fit = vgam(BMI ~ s(age, df = 4), lms.yjn, bminz, trace = TRUE)
+fit = vgam(BMI ~ s(age, df = 4), lms.yjn, bmi.nz, trace = TRUE)
head(predict(fit))
head(fitted(fit))
-head(bminz)
+head(bmi.nz)
# Person 1 is near the lower quartile of BMI amongst people his age
head(cdf(fit))
diff --git a/man/loge.Rd b/man/loge.Rd
index a89cc1c..f406851 100644
--- a/man/loge.Rd
+++ b/man/loge.Rd
@@ -111,7 +111,7 @@ nloge(theta, earg = list(), inverse = FALSE, deriv = 0,
}
\examples{
\dontrun{ loge(seq(-0.2, 0.5, by = 0.1))
-loge(seq(-0.2, 0.5, by = 0.1), earg = list(bvalue = .Machine$double.xmin))
+ loge(seq(-0.2, 0.5, by = 0.1), earg = list(bvalue = .Machine$double.xmin))
nloge(seq(-0.2, 0.5, by = 0.1))
nloge(seq(-0.2, 0.5, by = 0.1), earg = list(bvalue = .Machine$double.xmin)) }
}
diff --git a/man/logistic.Rd b/man/logistic.Rd
index e93709f..fde3f27 100644
--- a/man/logistic.Rd
+++ b/man/logistic.Rd
@@ -94,7 +94,7 @@ New York: Wiley-Interscience, Third edition.
Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005)
\emph{Extreme Value and Related Models with Applications in
Engineering and Science},
-Hoboken, N.J.: Wiley-Interscience, p.130.
+Hoboken, NJ, USA: Wiley-Interscience, p.130.
deCani, J. S. and Stine, R. A. (1986)
diff --git a/man/loglaplace.Rd b/man/loglaplace.Rd
index 051859b..19b5f9c 100644
--- a/man/loglaplace.Rd
+++ b/man/loglaplace.Rd
@@ -29,6 +29,7 @@ logitlaplace1(tau = NULL, llocation = "logit", elocation = list(),
\item{tau, kappa}{
See \code{\link{alaplace1}}.
+
}
\item{llocation}{ Character.
Parameter link functions for
@@ -42,11 +43,13 @@ logitlaplace1(tau = NULL, llocation = "logit", elocation = list(),
\code{\link{cloglog}},
etc.
+
}
\item{elocation}{
List. Extra argument for each of the links.
See \code{earg} in \code{\link{Links}} for general information.
+
}
\item{ilocation}{
Optional initial values.
@@ -54,6 +57,7 @@ logitlaplace1(tau = NULL, llocation = "logit", elocation = list(),
appropriate length.
The default is to choose the value internally.
+
}
\item{parallelLocation}{ Logical.
Should the quantiles be parallel on the transformed scale
@@ -61,21 +65,25 @@ logitlaplace1(tau = NULL, llocation = "logit", elocation = list(),
Assigning this argument to \code{TRUE} circumvents the
seriously embarrassing quantile crossing problem.
+
}
% \item{sameScale}{ Logical.
% Should the scale parameters be equal? It is advised to keep
-% \code{sameScale=TRUE} unchanged because it does not make sense to
+% \code{sameScale = TRUE} unchanged because it does not make sense to
% have different values for each \code{tau} value.
% }
+
\item{imethod}{
Initialization method.
Either the value 1, 2, or \ldots.
+
}
\item{dfmu.init, shrinkage.init, Scale.arg, digt, zero}{
See \code{\link{alaplace1}}.
+
}
\item{rep0, rep01}{
Numeric, positive.
@@ -84,12 +92,13 @@ logitlaplace1(tau = NULL, llocation = "logit", elocation = list(),
by \code{rep0}; it avoids computing \code{log(0)}.
For proportions data values of the response whose value is 0 or 1
are replaced by
- \code{min(rangey01[1]/2, rep01/w[y<=0])} and
+ \code{min(rangey01[1]/2, rep01/w[y< = 0])} and
\code{max((1 + rangey01[2])/2, 1-rep01/w[y >= 1])}
respectively; e.g., it avoids computing \code{logit(0)} or \code{logit(1)}.
Here, \code{rangey01} is the 2-vector \code{range(y[(y > 0) & (y < 1)])}
of the response.
+
}
\item{minquantile, maxquantile}{
Numeric.
@@ -97,9 +106,10 @@ logitlaplace1(tau = NULL, llocation = "logit", elocation = list(),
These argument are effectively ignored by default since
\code{\link{loge}} keeps all quantiles positive.
However, if
- \code{llocation = "logoff", elocation = list(offset=1)}
+ \code{llocation = "logoff", elocation = list(offset = 1)}
then it is possible that the fitted quantiles have value 0
- because \code{minquantile=0}.
+ because \code{minquantile = 0}.
+
}
}
@@ -116,12 +126,14 @@ logitlaplace1(tau = NULL, llocation = "logit", elocation = list(),
There are many variants of ALDs and the one used here
is described in \code{\link{alaplace1}}.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
In the \code{extra} slot of the fitted object are some list
components which are useful.
For example, the sample proportion of
@@ -132,6 +144,7 @@ logitlaplace1(tau = NULL, llocation = "logit", elocation = list(),
and \code{location} is a fitted quantile curve.
This definition comes about naturally from the transformed ALD data.
+
}
\references{
@@ -141,15 +154,18 @@ a revisit with applications to communications,
economics, engineering, and finance},
Boston: Birkhauser.
+
Kozubowski, T. J. and Podgorski, K. (2003)
Log-Laplace distributions.
\emph{International Mathematical Journal},
\bold{3}, 467--495.
- Yee, T. W. (2011)
+
+ Yee, T. W. (2012)
Quantile regression for counts and proportions.
In preparation.
+
}
\author{ Thomas W. Yee }
\section{Warning}{
@@ -157,6 +173,7 @@ Log-Laplace distributions.
not handle a vector of just 0s and 1s as the response;
it will only work satisfactorily if the number of trials is large.
+
See \code{\link{alaplace1}} for other warnings.
Care is needed with \code{tau} values which are too small, e.g.,
for count data the sample
@@ -165,6 +182,7 @@ Log-Laplace distributions.
which also requires all \code{tau} values to be less than the
sample proportion of ones.
+
}
\note{
The form of input for \code{\link{logitlaplace1}} as response
@@ -174,6 +192,7 @@ Log-Laplace distributions.
See Example 2 below.
See \code{\link{alaplace1}} for other notes in general.
+
}
\seealso{
@@ -187,21 +206,21 @@ Log-Laplace distributions.
set.seed(123); my.k = exp(0)
alldat = data.frame(x2 = sort(runif(n <- 500)))
mymu = function(x) exp( 1 + 3*sin(2*x) / (x+0.5)^2)
-alldat = transform(alldat, y = rnbinom(n, mu=mymu(x2), size=my.k))
+alldat = transform(alldat, y = rnbinom(n, mu = mymu(x2), size = my.k))
mytau = c(0.1, 0.25, 0.5, 0.75, 0.9); mydof = 3
-fitp = vglm(y ~ bs(x2, df=mydof), data=alldat, trace=TRUE,
- loglaplace1(tau=mytau, parallelLoc=TRUE)) # halfstepping is usual
+fitp = vglm(y ~ bs(x2, df = mydof), data=alldat, trace = TRUE,
+ loglaplace1(tau = mytau, parallelLoc = TRUE)) # halfstepping is usual
\dontrun{
-par(las=1) # Plot on a log1p() scale
+par(las = 1) # Plot on a log1p() scale
mylwd = 1.5
-with(alldat, plot(x2, jitter(log1p(y), factor=1.5), col="red", pch="o",
- main="Example 1; darkgreen=truth, blue=estimated", cex=0.75))
-with(alldat, matlines(x2, log1p(fitted(fitp)), col="blue", lty=1, lwd=mylwd))
+with(alldat, plot(x2, jitter(log1p(y), factor = 1.5), col = "red", pch = "o",
+ main = "Example 1; darkgreen=truth, blue=estimated", cex = 0.75))
+with(alldat, matlines(x2, log1p(fitted(fitp)), col = "blue", lty = 1, lwd = mylwd))
finexgrid = seq(0, 1, len=201)
for(ii in 1:length(mytau))
- lines(finexgrid, col="darkgreen", lwd=mylwd,
- log1p(qnbinom(p=mytau[ii], mu=mymu(finexgrid), si=my.k)))
+ lines(finexgrid, col = "darkgreen", lwd = mylwd,
+ log1p(qnbinom(p = mytau[ii], mu = mymu(finexgrid), si = my.k)))
}
fitp at extra # Contains useful information
@@ -209,40 +228,40 @@ fitp at extra # Contains useful information
# Example 2: sample proportions
set.seed(123); nnn = 1000; ssize = 100 # ssize = 1 will not work!
alldat = data.frame(x2 = sort(runif(nnn)))
-mymu = function(x) logit( 1.0 + 4*x, inv=TRUE)
+mymu = function(x) logit( 1.0 + 4*x, inv = TRUE)
alldat = transform(alldat, ssize = ssize,
- y2 = rbinom(nnn, size=ssize, prob=mymu(x2)) / ssize)
+ y2 = rbinom(nnn, size=ssize, prob = mymu(x2)) / ssize)
mytau = c(0.25, 0.50, 0.75)
-fit1 = vglm(y2 ~ bs(x2, df=3), data=alldat, weights=ssize, trace=TRUE,
- logitlaplace1(tau=mytau, lloc="cloglog", paral=TRUE))
+fit1 = vglm(y2 ~ bs(x2, df = 3), data=alldat, weights=ssize, trace = TRUE,
+ logitlaplace1(tau = mytau, lloc = "cloglog", paral = TRUE))
\dontrun{
# Check the solution. Note: this may be like comparing apples with oranges.
-plotvgam(fit1, se=TRUE, scol="red", lcol="blue", main="Truth = 'darkgreen'")
+plotvgam(fit1, se = TRUE, scol = "red", lcol = "blue", main = "Truth = 'darkgreen'")
# Centered approximately !
linkFunctionChar = as.character(fit1 at misc$link)
alldat = transform(alldat, trueFunction=
- theta2eta(theta=mymu(x2), link=linkFunctionChar))
-with(alldat, lines(x2, trueFunction - mean(trueFunction), col="darkgreen"))
+ theta2eta(theta = mymu(x2), link=linkFunctionChar))
+with(alldat, lines(x2, trueFunction - mean(trueFunction), col = "darkgreen"))
# Plot the data + fitted quantiles (on the original scale)
myylim = with(alldat, range(y2))
-with(alldat, plot(x2, y2, col="blue", ylim=myylim, las=1, pch=".", cex=2.5))
-with(alldat, matplot(x2, fitted(fit1), add=TRUE, lwd=3, type="l"))
+with(alldat, plot(x2, y2, col = "blue", ylim = myylim, las = 1, pch = ".", cex=2.5))
+with(alldat, matplot(x2, fitted(fit1), add = TRUE, lwd = 3, type = "l"))
truecol = rep(1:3, len=fit1 at misc$M) # Add the 'truth'
smallxgrid = seq(0, 1, len=501)
for(ii in 1:length(mytau))
lines(smallxgrid, col=truecol[ii], lwd=2,
- qbinom(p=mytau[ii], prob=mymu(smallxgrid), size=ssize) / ssize)
+ qbinom(p = mytau[ii], prob = mymu(smallxgrid), size=ssize) / ssize)
# Plot on the eta (== logit()/probit()/...) scale
-with(alldat, matplot(x2, predict(fit1), add=FALSE, lwd=3, type="l"))
+with(alldat, matplot(x2, predict(fit1), add = FALSE, lwd = 3, type = "l"))
# Add the 'truth'
for(ii in 1:length(mytau)) {
- true.quant = qbinom(p=mytau[ii], pr=mymu(smallxgrid), si=ssize)/ssize
+ true.quant = qbinom(p = mytau[ii], pr = mymu(smallxgrid), si=ssize)/ssize
lines(smallxgrid, theta2eta(theta=true.quant, link=linkFunctionChar),
col=truecol[ii], lwd=2)
}
diff --git a/man/loglog.Rd b/man/loglog.Rd
index 32f2f0d..f3e09db 100644
--- a/man/loglog.Rd
+++ b/man/loglog.Rd
@@ -63,17 +63,22 @@ loglog(theta, earg = list(), inverse = FALSE, deriv = 0,
and if \code{inverse = TRUE} then
\code{exp(exp(theta))}.
+
For \code{deriv = 1}, then the function returns
\emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
if \code{inverse = FALSE},
else if \code{inverse = TRUE} then it returns the reciprocal.
+
Here, all logarithms are natural logarithms, i.e., to base \emph{e}.
+
}
\references{
- McCullagh, P. and Nelder, J. A. (1989)
- \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+ McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
+
}
\author{ Thomas W. Yee }
@@ -81,6 +86,7 @@ loglog(theta, earg = list(), inverse = FALSE, deriv = 0,
Numerical instability may occur when \code{theta} is
close to 1 unless \code{earg} is used.
+
}
\seealso{
@@ -92,11 +98,11 @@ loglog(theta, earg = list(), inverse = FALSE, deriv = 0,
\examples{
x = seq(0.8, 1.5, by=0.1)
loglog(x) # Has NAs
-loglog(x, earg=list(bvalue=1.0 + .Machine$double.eps)) # Has no NAs
+loglog(x, earg = list(bvalue = 1.0 + .Machine$double.eps)) # Has no NAs
-x = seq(1.01, 10, len=100)
+x = seq(1.01, 10, len = 100)
loglog(x)
-max(abs(loglog(loglog(x), inverse=TRUE) - x)) # Should be 0
+max(abs(loglog(loglog(x), inverse = TRUE) - x)) # Should be 0
}
\keyword{math}
\keyword{models}
diff --git a/man/lognormal.Rd b/man/lognormal.Rd
index 361d173..2d8610f 100644
--- a/man/lognormal.Rd
+++ b/man/lognormal.Rd
@@ -93,7 +93,7 @@ lognormal3(lmeanlog = "identity", lsdlog = "loge",
Kleiber, C. and Kotz, S. (2003)
\emph{Statistical Size Distributions in Economics and
Actuarial Sciences},
-Hoboken, NJ: Wiley-Interscience.
+Hoboken, NJ, USA: Wiley-Interscience.
}
diff --git a/man/lomax.Rd b/man/lomax.Rd
index a213194..8466288 100644
--- a/man/lomax.Rd
+++ b/man/lomax.Rd
@@ -7,23 +7,23 @@
Lomax distribution.
}
\usage{
-lomax(link.scale = "loge", link.q = "loge", earg.scale=list(),
- earg.q=list(), init.scale = NULL, init.q = 1, zero = NULL)
+lomax(lscale = "loge", lshape3.q = "loge", escale = list(),
+ eshape3.q = list(), iscale = NULL, ishape3.q = 2, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link.scale, link.q}{
+ \item{lscale, lshape3.q}{
Parameter link function applied to the
(positive) parameters \code{scale} and \code{q}.
See \code{\link{Links}} for more choices.
}
- \item{earg.scale, earg.q}{
+ \item{escale, eshape3.q}{
List. Extra argument for each of the links.
See \code{earg} in \code{\link{Links}} for general information.
}
- \item{init.scale, init.q}{
+ \item{iscale, ishape3.q}{
Optional initial values for \code{scale} and \code{q}.
}
@@ -57,7 +57,7 @@ The cumulative distribution function is
The mean is
\deqn{E(Y) = b/(q-1)}{%
E(Y) = b/(q-1)}
-provided \eqn{q > 1}.
+provided \eqn{q > 1}; these are returned as the fitted values.
}
@@ -66,12 +66,15 @@ provided \eqn{q > 1}.
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
+
Kleiber, C. and Kotz, S. (2003)
\emph{Statistical Size Distributions in Economics and
Actuarial Sciences},
-Hoboken, NJ: Wiley-Interscience.
+Hoboken, NJ, USA: Wiley-Interscience.
+
}
@@ -81,6 +84,7 @@ If the self-starting initial values fail, try experimenting
with the initial value arguments, especially those whose
default value is not \code{NULL}.
+
}
\seealso{
@@ -93,12 +97,14 @@ default value is not \code{NULL}.
\code{\link{invlomax}},
\code{\link{paralogistic}},
\code{\link{invparalogistic}}.
+
}
\examples{
-lodat = data.frame(y = rlomax(n=2000, 6, 2))
-fit = vglm(y ~ 1, lomax, lodat, trace=TRUE, crit="c")
-coef(fit, mat=TRUE)
+ldata = data.frame(y = rlomax(n = 1000, exp(1), exp(1)))
+fit = vglm(y ~ 1, lomax, ldata, trace = TRUE)
+fit = vglm(y ~ 1, lomax(iscale = exp(1), ishape3.q = 2), ldata, trace = TRUE)
+coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
}
diff --git a/man/LomaxUC.Rd b/man/lomaxUC.Rd
similarity index 74%
rename from man/LomaxUC.Rd
rename to man/lomaxUC.Rd
index a37c4b7..3715d01 100644
--- a/man/LomaxUC.Rd
+++ b/man/lomaxUC.Rd
@@ -11,21 +11,21 @@
and shape parameter \code{q}.
}
\usage{
-dlomax(x, scale=1, q.arg, log=FALSE)
-plomax(q, scale=1, q.arg)
-qlomax(p, scale=1, q.arg)
-rlomax(n, scale=1, q.arg)
+dlomax(x, scale = 1, shape3.q, log = FALSE)
+plomax(q, scale = 1, shape3.q)
+qlomax(p, scale = 1, shape3.q)
+rlomax(n, scale = 1, shape3.q)
}
\arguments{
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
\item{n}{number of observations. If \code{length(n) > 1}, the length
is taken to be the number required.}
- \item{q.arg}{shape parameter.}
\item{scale}{scale parameter.}
+ \item{shape3.q}{shape parameter.}
\item{log}{
Logical.
- If \code{log=TRUE} then the logarithm of the density is returned.
+ If \code{log = TRUE} then the logarithm of the density is returned.
}
@@ -37,28 +37,33 @@ rlomax(n, scale=1, q.arg)
\code{rlomax} generates random deviates.
}
\references{
+
Kleiber, C. and Kotz, S. (2003)
\emph{Statistical Size Distributions in Economics and
Actuarial Sciences},
-Hoboken, NJ: Wiley-Interscience.
+Hoboken, NJ, USA: Wiley-Interscience.
+
}
\author{ T. W. Yee }
\details{
See \code{\link{lomax}}, which is the \pkg{VGAM} family function
for estimating the parameters by maximum likelihood estimation.
+
}
\note{
The Lomax distribution is a special case of the 4-parameter
generalized beta II distribution.
+
}
\seealso{
\code{\link{lomax}},
\code{\link{genbetaII}}.
+
}
\examples{
-y = rlomax(n=2000, 6, 2)
-fit = vglm(y ~ 1, lomax(init.q=2.1), trace=TRUE, crit="c")
-coef(fit, mat=TRUE)
+ldata = data.frame(y = rlomax(n = 2000, 6, 2))
+fit = vglm(y ~ 1, lomax(ishape3.q = 2.1), ldata, trace = TRUE, crit = "coef")
+coef(fit, matrix = TRUE)
Coef(fit)
}
\keyword{distribution}
diff --git a/man/lrtest.Rd b/man/lrtest.Rd
new file mode 100644
index 0000000..34eaebc
--- /dev/null
+++ b/man/lrtest.Rd
@@ -0,0 +1,167 @@
+\name{lrtest}
+\alias{lrtest}
+\alias{lrtest_vglm}
+%\alias{update_formula}
+%\alias{update_default}
+\title{Likelihood Ratio Test of Nested Models}
+
+\description{
+ \code{lrtest} is a generic function for carrying out likelihood ratio tests.
+ The default method can be employed for comparing nested VGLMs
+ (see details below).
+
+}
+
+\usage{
+ lrtest(object, \dots)
+
+ lrtest_vglm(object, \dots, name = NULL)
+
+}
+%\method{lrtest}{default}(object, \dots, name = NULL)
+
+%\method{lrtest}{formula}(object, \dots, data = list())
+
+
+
+\arguments{
+ \item{object}{
+ a \code{\link{vglm}} object.
+ See below for details.
+
+
+ }
+ \item{\dots}{
+ further object specifications passed to methods.
+ See below for details.
+
+
+ }
+ \item{name}{
+ a function for extracting a suitable name/description from
+ a fitted model object.
+ By default the name is queried by calling \code{\link{formula}}.
+
+
+ }
+
+
+
+% \item{data}{
+% a data frame containing the variables in the model.
+%
+% }
+
+
+}
+
+\details{
+ \code{lrtest} is intended to be a generic function for
+ comparisons of models via asymptotic likelihood ratio
+ tests. The default method consecutively compares the
+ fitted model object \code{object} with the models passed
+ in \code{\dots}. Instead of passing the fitted model
+ objects in \code{\dots}, several other specifications
+ are possible. The updating mechanism is the same as for
+ \code{\link[lmtest]{waldtest}}: the models in \code{\dots}
+ can be specified as integers, characters (both for terms
+ that should be eliminated from the previous model),
+ update formulas or fitted model objects. Except for
+ the last case, the existence of an \code{\link[stats]{update}}
+ method is assumed. See \code{\link[lmtest]{waldtest}} for details.
+
+
+ Subsequently, an asymptotic likelihood ratio test for each
+ two consecutive models is carried out: Twice the difference
+ in log-likelihoods (as derived by the \code{\link[stats]{logLik}}
+ methods) is compared with a Chi-squared distribution.
+
+
+% The \code{"formula"} method fits a \code{\link{lm}}
+% first and then calls the default method.
+
+
+}
+
+\note{
+ The code was adapted directly from \pkg{lmtest} (written by
+ T. Hothorn, A. Zeileis, G. Millo, D. Mitchell)
+ and made to work for VGLMs and S4.
+ This help file also was adapted from \pkg{lmtest}.
+
+
+ \emph{Approximate} LRTs might be applied to VGAMs, as
+ produced by \code{\link{vgam}}, but it is probably better in inference
+ to use \code{\link{vglm}} with regression splines
+ (\code{\link[splines]{bs}} and
+ \code{\link[splines]{ns}}).
+ This methods function should not be applied to other models such as
+ those produced
+ by \code{\link{rrvglm}},
+ by \code{\link{cqo}},
+ by \code{\link{cao}}.
+
+
+}
+
+\section{Warning }{
+ Several \pkg{VGAM} family functions implement distributions
+ which do not satisfying the usual regularity conditions needed for
+ the LRT to work. No checking or warning is given for these.
+
+}
+
+
+\value{
+ An object of class \code{"VGAManova"} which contains a slot
+ with the
+ log-likelihood, degrees of freedom, the difference in
+ degrees of freedom, likelihood ratio Chi-squared statistic
+ and corresponding p value.
+ These are printed by \code{stats:::print.anova()};
+ see \code{\link[stats]{anova}}.
+
+
+}
+
+\seealso{
+ \pkg{lmtest},
+ \code{\link{vglm}}.
+
+
+% \code{\link{waldtest}}
+% \code{update_default},
+% \code{update_formula}.
+
+
+
+}
+
+\examples{
+set.seed(1)
+pneumo = transform(pneumo, let = log(exposure.time), x3 = runif(nrow(pneumo)))
+fit1 = vglm(cbind(normal, mild, severe) ~ let , propodds, pneumo)
+fit2 = vglm(cbind(normal, mild, severe) ~ let + x3, propodds, pneumo)
+fit3 = vglm(cbind(normal, mild, severe) ~ let , cumulative, pneumo)
+# Various equivalent specifications of the LR test for testing x3
+(ans1 <- lrtest(fit2, fit1))
+ans2 <- lrtest(fit2, 2)
+ans3 <- lrtest(fit2, "x3")
+ans4 <- lrtest(fit2, . ~ . - x3)
+c(all.equal(ans1, ans2), all.equal(ans1, ans3), all.equal(ans1, ans4))
+
+# Doing it manually
+(testStatistic <- 2 * (logLik(fit2) - logLik(fit1)))
+(mypval <- pchisq(testStatistic, df = length(coef(fit2)) - length(coef(fit1)),
+ lower.tail = FALSE))
+
+(ans4 <- lrtest(fit3, fit1)) # Test proportional odds (parallelism) assumption
+}
+
+\keyword{htest}
+
+
+%(testStatistic <- 2 * (logLik(fit3) - logLik(fit1)))
+%(mypval <- pchisq(testStatistic, df = length(coef(fit3)) - length(coef(fit1)),
+% lower.tail = FALSE))
+
diff --git a/man/margeff.Rd b/man/margeff.Rd
index 5db3fb5..adb2649 100644
--- a/man/margeff.Rd
+++ b/man/margeff.Rd
@@ -41,20 +41,24 @@ margeff(object, subset = NULL)
of explanatory variables and the (hopefully) nominal response has
\eqn{M+1} levels, and there are \eqn{n} observations.
+
If
\code{is.numeric(subset)}
and
\code{length(subset) == 1} then a
\eqn{p} by \eqn{M+1} matrix is returned.
+
}
% \references{ ~put references to the literature/web site here ~ }
\author{ T. W. Yee }
\section{Warning }{
- Care is needed in interpretation, e.g., the change is not universally
- accurate for a unit change in each explanatory variable because
- eventually the `new' probabilities may become negative or greater
- than unity. Also, the `new' probabilities will not sum to one.
+ Care is needed in interpretation, e.g., the change is not
+ universally accurate for a unit change in each explanatory
+ variable because eventually the `new' probabilities may become
+ negative or greater than unity. Also, the `new' probabilities
+ will not sum to one.
+
This function is not applicable for models with
data-dependent terms such as \code{\link{bs}} and
@@ -66,6 +70,7 @@ margeff(object, subset = NULL)
The \code{formula} in \code{object} should comprise of simple terms
of the form \code{ ~ x2 + x3 + x4}, etc.
+
}
\note{
@@ -76,6 +81,7 @@ margeff(object, subset = NULL)
the \code{xij} or \code{form2} arguments,
nor \code{\link{vgam}} objects.
+
For \code{\link{multinomial}}
if \code{subset} is numeric then the function uses a \code{for} loop over
the observations (slow).
diff --git a/man/nzmarital.Rd b/man/marital.nz.Rd
similarity index 53%
rename from man/nzmarital.Rd
rename to man/marital.nz.Rd
index 7f998cc..795d679 100644
--- a/man/nzmarital.Rd
+++ b/man/marital.nz.Rd
@@ -1,5 +1,5 @@
-\name{nzmarital}
-\alias{nzmarital}
+\name{marital.nz}
+\alias{marital.nz}
\docType{data}
\title{
New Zealand Marital Data.
@@ -9,7 +9,7 @@
early 1990s.
}
-\usage{data(nzmarital)}
+\usage{data(marital.nz)}
\format{
A data frame with 6053 observations on the following 3 variables.
\describe{
@@ -20,17 +20,19 @@
}
\item{\code{mstatus}}{a factor with levels
- \code{Divorced/Separated} \code{Married/Partnered} \code{Single} \code{Widowed}.
- }
- }
+ \code{Divorced/Separated}, \code{Married/Partnered},
+ \code{Single}, \code{Widowed}.
+ } }
}
+
\details{
-This is a subset of a data set collected from a self-administered
-questionnaire administered in a large New Zealand workforce observational
-study conducted during 1992--3. The data were augmented by a second
-study consisting of retirees. The data can be considered a reasonable
-representation of the white male New Zealand population in the early
-1990s.
+This is a subset of a data set collected from a
+self-administered questionnaire administered in a large
+New Zealand workforce observational study conducted
+during 1992--3. The data were augmented by a second study
+consisting of retirees. The data can be considered a
+reasonable representation of the white male New Zealand
+population in the early 1990s.
}
@@ -41,12 +43,12 @@ representation of the white male New Zealand population in the early
}
\references{
- See \code{\link{bminz}} and \code{\link{chestnz}}.
+ See \code{\link{bmi.nz}} and \code{\link{chest.nz}}.
}
\examples{
-summary(nzmarital)
+summary(marital.nz)
}
\keyword{datasets}
diff --git a/man/maxwell.Rd b/man/maxwell.Rd
index 51e000f..8099f19 100644
--- a/man/maxwell.Rd
+++ b/man/maxwell.Rd
@@ -5,21 +5,20 @@
\description{
Estimating the parameter of the Maxwell distribution by
maximum likelihood estimation.
+
}
\usage{
maxwell(link = "loge", earg = list())
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link}{
- Parameter link function applied to the parameter \eqn{a}.
- See \code{\link{Links}} for more choices.
- A log link is the default because the parameter is positive.
+ \item{link, earg}{
+ Parameter link function and extra argument
+ applied to the parameter \eqn{a}.
+ See \code{\link{Links}} for more choices and information;
+ a log link is the default because the parameter is positive.
+ More information is at \code{\link{CommonVGAMffArguments}}.
- }
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
}
}
@@ -58,6 +57,7 @@ maxwell(link = "loge", earg = list())
A related distribution is the Rayleigh distribution.
Fisher-scoring and Newton-Raphson are the same here.
+
}
\seealso{
@@ -67,7 +67,7 @@ maxwell(link = "loge", earg = list())
}
\examples{
mdata = data.frame(y = rmaxwell(1000, a = exp(2)))
-fit = vglm(y ~ 1, maxwell, mdata, trace = TRUE, crit = "c")
+fit = vglm(y ~ 1, maxwell, mdata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit)
}
diff --git a/man/maxwellUC.Rd b/man/maxwellUC.Rd
index 3942255..49c55d4 100644
--- a/man/maxwellUC.Rd
+++ b/man/maxwellUC.Rd
@@ -58,6 +58,7 @@ rmaxwell(n, a)
\note{
The Maxwell distribution is related to the Rayleigh distribution.
+
}
\seealso{
\code{\link{maxwell}},
@@ -67,7 +68,7 @@ rmaxwell(n, a)
}
\examples{
-\dontrun{ a <- 3; x <- seq(-0.5, 3, len = 100)
+\dontrun{ a <- 3; x <- seq(-0.5, 3, length = 100)
plot(x, dmaxwell(x, a = a), type = "l", col = "blue", las = 1, ylab = "",
main = "Blue is density, orange is cumulative distribution function",
sub = "Purple lines are the 10,20,...,90 percentiles")
diff --git a/man/mbinomial.Rd b/man/mbinomial.Rd
index 6c3a568..360608a 100644
--- a/man/mbinomial.Rd
+++ b/man/mbinomial.Rd
@@ -19,16 +19,15 @@ mbinomial(mvar = NULL, link = "logit", earg = list(),
The intercept should be suppressed from the formula, and
the term must be a \code{\link[base]{factor}}.
+
}
- \item{link}{
- Parameter link function applied to the probability.
+ \item{link, earg}{
+ Parameter link function and extra argument for the probability
+ parameter.
% called \eqn{p} below.
- See \code{\link{Links}} for more choices.
+ Information for these are at \code{\link{Links}}
+ and \code{\link{CommonVGAMffArguments}}.
- }
- \item{earg}{
- List. Extra arguments for the links.
- See \code{earg} in \code{\link{Links}} for general information.
}
\item{parallel}{
@@ -45,16 +44,18 @@ mbinomial(mvar = NULL, link = "logit", earg = list(),
}
}
\details{
- By default, this \pkg{VGAM} family function fits a logistic regression
- model to a binary response from a matched case-control study. Here,
- each case \eqn{(Y = 1}) is matched with one or more controls \eqn{(Y = 0})
- with respect to some matching variables (confounders). For example,
- the first matched set is all women aged from 20 to 25, the second
- matched set is women aged between 26 to 30, etc. The logistic
- regression has a different intercept for each matched set but the other
- regression coefficients are assumed to be the same across matched sets
+ By default, this \pkg{VGAM} family function fits a logistic
+ regression model to a binary response from a matched case-control
+ study. Here, each case \eqn{(Y = 1}) is matched with one or more
+ controls \eqn{(Y = 0}) with respect to some matching variables
+ (confounders). For example, the first matched set is all women
+ aged from 20 to 25, the second matched set is women aged between
+ 26 to 30, etc. The logistic regression has a different intercept
+ for each matched set but the other regression coefficients
+ are assumed to be the same across matched sets
(\code{parallel = TRUE}).
+
Let \eqn{C} be the number of matched sets.
This \pkg{VGAM} family function uses a trick by allowing \eqn{M},
the number of linear/additive predictors, to be equal to \eqn{C},
@@ -68,61 +69,72 @@ mbinomial(mvar = NULL, link = "logit", earg = list(),
The algorithm here constructs a different constraint matrix for
each of the \eqn{C} columns.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\references{
Section 8.2 of
Hastie, T. J. and Tibshirani, R. J. (1990)
\emph{Generalized Additive Models}, London: Chapman & Hall.
+
Pregibon, D. (1984)
Data analytic methods for matched case-control studies.
\emph{Biometrics},
\bold{40},
639--651.
+
Chapter 7 of
Breslow, N. E. and Day, N. E. (1980)
\emph{Statistical Methods in Cancer Research I: The Analysis
of Case-Control Studies}.
Lyon: International Agency for Research on Cancer.
+
Holford, T. R. and White, C. and Kelsey, J. L. (1978)
Multivariate analysis for matched case-control studies.
\emph{American Journal of Epidemiology},
\bold{107}, 245--256.
+
}
\author{ Thomas W. Yee }
\note{
- The response is assumed to be in a format that can also be inputted
- into \code{\link{binomialff}}.
+ The response is assumed to be in a format that can also be
+ inputted into \code{\link{binomialff}}.
+
}
\section{Warning }{
- Both the memory requirements and computational time of this \pkg{VGAM}
- family function grows very quickly with respect to the number of
- matched sets. For example, the large model matrix of a data set with 100
- matched sets consisting of one case and one control per set will take
- up at least (about) 20Mb of memory. For a constant number of cases and controls
- per matched set, the memory requirements are \eqn{O(C^3)} and the
- the computational time is \eqn{O(C^4)} flops.
-
- The example below has been run successfully with \code{n = 700} (this
- corresponds to \eqn{C = 350}) but only on a big machine and it took over
- 10 minutes. The large model matrix was 670Mb.
+ Both the memory requirements and computational time of this
+ \pkg{VGAM} family function grows very quickly with respect
+ to the number of matched sets. For example, the large model
+ matrix of a data set with 100 matched sets consisting of one
+ case and one control per set will take up at least (about)
+ 20Mb of memory. For a constant number of cases and controls
+ per matched set, the memory requirements are \eqn{O(C^3)}
+ and the the computational time is \eqn{O(C^4)} flops.
+
+
+ The example below has been run successfully with \code{n = 700}
+ (this corresponds to \eqn{C = 350}) but only on a big machine
+ and it took over 10 minutes. The large model matrix was 670Mb.
+
}
\seealso{
\code{\link{binomialff}}.
+
}
\examples{
\dontrun{
@@ -135,9 +147,9 @@ mydat = data.frame(x2 = rnorm(n), x3 = rep(rnorm(n/2), each = 2))
xmat = with(mydat, cbind(x2, x3))
mydat = transform(mydat, eta = -0.1 + 0.2 * x2 + 0.3 * x3)
etamat = with(mydat, matrix(eta, n/2, 2))
-condmu = exp(etamat[,1]) / (exp(etamat[,1]) + exp(etamat[,2]))
+condmu = exp(etamat[, 1]) / (exp(etamat[, 1]) + exp(etamat[, 2]))
y1 = ifelse(runif(n/2) < condmu, 1, 0)
-y = cbind(y1, 1-y1)
+y = cbind(y1, 1 - y1)
mydat = transform(mydat, y = c(y1, 1-y1),
ID = factor(c(row(etamat))))
fit = vglm(y ~ 1 + ID + x2, trace = TRUE,
@@ -152,8 +164,7 @@ objsizemb(fit) # in Mb
VLMX = model.matrix(fit, type = "vlm") # The big model matrix
dim(VLMX)
objsizemb(VLMX) # in Mb
-rm(VLMX)
-}
+rm(VLMX) }
}
\keyword{models}
\keyword{regression}
diff --git a/man/mccullagh89.Rd b/man/mccullagh89.Rd
index 558e7e2..3fe6c00 100644
--- a/man/mccullagh89.Rd
+++ b/man/mccullagh89.Rd
@@ -3,8 +3,8 @@
%- Also NEED an '\alias' for EACH other topic documented here.
\title{McCullagh (1989) Distribution Family Function}
\description{
- Estimates the two parameters of the McCullagh (1989) distribution by
- maximum likelihood estimation.
+ Estimates the two parameters of the McCullagh (1989)
+ distribution by maximum likelihood estimation.
}
\usage{
@@ -111,7 +111,7 @@ all else fails.
%}
\examples{
-mdata = data.frame(y = rnorm(n = 1000, sd = 0.2)) # Limit as theta = 0, nu is Inf
+mdata = data.frame(y = rnorm(n = 1000, sd = 0.2)) # Limit as theta = 0, nu = Inf
fit = vglm(y ~ 1, mccullagh89, mdata, trace = TRUE)
head(fitted(fit))
with(mdata, mean(y))
diff --git a/man/mix2exp.Rd b/man/mix2exp.Rd
index fa8e03e..f40f332 100644
--- a/man/mix2exp.Rd
+++ b/man/mix2exp.Rd
@@ -54,8 +54,10 @@ mix2exp(lphi = "logit", llambda = "loge", ephi = list(),
}
\details{
The probability function can be loosely written as
- \deqn{P(Y=y) = \phi\,Exponential(\lambda_1) + (1-\phi)\,Exponential(\lambda_2)}{%
- P(Y=y) = phi * Exponential(lambda1) + (1-phi) * Exponential(lambda2)}
+ \deqn{P(Y=y) = \phi\,Exponential(\lambda_1) +
+ (1-\phi)\,Exponential(\lambda_2)}{%
+ P(Y=y) = phi * Exponential(lambda1) +
+ (1-phi) * Exponential(lambda2)}
where \eqn{\phi}{phi} is the probability an observation belongs
to the first group, and \eqn{y>0}.
The parameter \eqn{\phi}{phi} satisfies \eqn{0 < \phi < 1}{0 < phi < 1}.
@@ -79,10 +81,11 @@ mix2exp(lphi = "logit", llambda = "loge", ephi = list(),
\section{Warning }{
This \pkg{VGAM} family function requires care for a successful
application.
- In particular, good initial values are required because of the presence
- of local solutions. Therefore running this function with several
- different combinations of arguments such as \code{iphi}, \code{il1},
- \code{il2}, \code{qmu} is highly recommended. Graphical methods such
+ In particular, good initial values are required because
+ of the presence of local solutions. Therefore running
+ this function with several different combinations of
+ arguments such as \code{iphi}, \code{il1}, \code{il2},
+ \code{qmu} is highly recommended. Graphical methods such
as \code{\link[graphics]{hist}} can be used as an aid.
@@ -90,14 +93,15 @@ mix2exp(lphi = "logit", llambda = "loge", ephi = list(),
\author{ T. W. Yee }
\note{
- Fitting this model successfully to data can be difficult due to
- local solutions, uniqueness problems and ill-conditioned data. It
- pays to fit the model several times with different initial values
- and check that the best fit looks reasonable. Plotting the results is
- recommended. This function works better as \eqn{\lambda_1}{lambda1}
- and \eqn{\lambda_2}{lambda2} become more different.
- The default control argument \code{trace = TRUE} is to encourage
- monitoring convergence.
+ Fitting this model successfully to data can be
+ difficult due to local solutions, uniqueness problems
+ and ill-conditioned data. It pays to fit the model
+ several times with different initial values and check
+ that the best fit looks reasonable. Plotting the
+ results is recommended. This function works better as
+ \eqn{\lambda_1}{lambda1} and \eqn{\lambda_2}{lambda2}
+ become more different. The default control argument
+ \code{trace = TRUE} is to encourage monitoring convergence.
}
@@ -114,8 +118,8 @@ lambda1 = exp(1); lambda2 = exp(3)
(phi = logit(-1, inverse = TRUE))
mdata = data.frame(y1 = rexp(nn <- 1000, lambda1))
mdata = transform(mdata, y2 = rexp(nn, lambda2))
-mdata = transform(mdata, y = ifelse(runif(nn) < phi, y1, y2))
-fit = vglm(y ~ 1, mix2exp, mdata, trace = TRUE)
+mdata = transform(mdata, Y = ifelse(runif(nn) < phi, y1, y2))
+fit = vglm(Y ~ 1, mix2exp, mdata, trace = TRUE)
coef(fit, matrix = TRUE)
# Compare the results with the truth
@@ -123,9 +127,9 @@ round(rbind('Estimated' = Coef(fit),
'Truth' = c(phi, lambda1, lambda2)), dig = 2)
\dontrun{# Plot the results
-with(mdata, hist(y, prob = TRUE, main = "Orange=estimate, blue=truth"))
-abline(v = 1/Coef(fit)[c(2,3)], lty = 2, col = "orange", lwd = 2)
-abline(v = 1/c(lambda1, lambda2), lty = 2, col = "blue", lwd = 2) }
+with(mdata, hist(Y, prob = TRUE, main = "Orange = estimate, blue = truth"))
+abline(v = 1 / Coef(fit)[c(2, 3)], lty = 2, col = "orange", lwd = 2)
+abline(v = 1 / c(lambda1, lambda2), lty = 2, col = "blue", lwd = 2) }
}
\keyword{models}
\keyword{regression}
diff --git a/man/mix2normal1.Rd b/man/mix2normal1.Rd
index 0dee335..0b73ea8 100644
--- a/man/mix2normal1.Rd
+++ b/man/mix2normal1.Rd
@@ -91,9 +91,10 @@ mix2normal1(lphi = "logit", lmu = "identity", lsd = "loge",
\eqn{\phi \mu_1 + (1-\phi) \mu_2}{phi*mu1 + (1-phi)*mu2}
and this is returned as the fitted values.
By default, the five linear/additive predictors are
- \eqn{(logit(\phi), \mu_1, \log(\sigma_1), \mu_2, \log(\sigma_2))^T}{(logit(phi),
- mu1, log(sd1), mu2, log(sd2))^T}.
- If \code{equalsd = TRUE} then \eqn{\sigma_1 = \sigma_2}{sd1=sd2} is enforced.
+ \eqn{(logit(\phi), \mu_1, \log(\sigma_1), \mu_2, \log(\sigma_2))^T}{
+ (logit(phi), mu1, log(sd1), mu2, log(sd2))^T}.
+ If \code{equalsd = TRUE} then \eqn{\sigma_1 = \sigma_2}{sd1=sd2}
+ is enforced.
}
diff --git a/man/morgenstern.Rd b/man/morgenstern.Rd
index 5ead3a9..3d63f78 100644
--- a/man/morgenstern.Rd
+++ b/man/morgenstern.Rd
@@ -81,8 +81,9 @@ morgenstern(lapar = "rhobit", earg = list(), iapar = NULL, tola0 = 0.01,
Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005)
-\emph{Extreme Value and Related Models with Applications in Engineering and Science},
-Hoboken, N.J.: Wiley-Interscience.
+\emph{Extreme Value and Related Models with Applications in
+ Engineering and Science},
+Hoboken, NJ, USA: Wiley-Interscience.
}
@@ -106,10 +107,11 @@ Hoboken, N.J.: Wiley-Interscience.
}
\examples{
-n = 1000; ymat = cbind(rexp(n), rexp(n))
+N = 1000; mdata = data.frame(y1 = rexp(N), y2 = rexp(N))
\dontrun{plot(ymat)}
-fit = vglm(ymat ~ 1, fam = morgenstern, trace = TRUE)
-fit = vglm(ymat ~ 1, fam = morgenstern, trace = TRUE, crit = "coef")
+fit = vglm(cbind(y1, y2) ~ 1, morgenstern, mdata, trace = TRUE)
+# This may fail:
+fit = vglm(cbind(y1, y2) ~ 1, morgenstern, mdata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit)
head(fitted(fit))
diff --git a/man/multinomial.Rd b/man/multinomial.Rd
index ceecff2..05d3804 100644
--- a/man/multinomial.Rd
+++ b/man/multinomial.Rd
@@ -45,32 +45,36 @@ multinomial(zero = NULL, parallel = FALSE, nointercept = NULL,
}
}
\details{
- In this help file the response \eqn{Y} is assumed to be a factor with
- unordered values \eqn{1,2,\dots,M+1}, so that \eqn{M} is the number
- of linear/additive predictors \eqn{\eta_j}{eta_j}.
+ In this help file the response \eqn{Y} is assumed to be
+ a factor with unordered values \eqn{1,2,\dots,M+1}, so
+ that \eqn{M} is the number of linear/additive predictors
+ \eqn{\eta_j}{eta_j}.
The default model can be written
\deqn{\eta_j = \log(P[Y=j]/ P[Y=M+1])}{%
eta_j = log(P[Y=j]/ P[Y=M+1])}
where \eqn{\eta_j}{eta_j} is the \eqn{j}th linear/additive predictor.
- Here, \eqn{j=1,\ldots,M}, and \eqn{\eta_{M+1}}{eta_{M+1}} is 0 by
- definition. That is, the last level of the factor, or last column of
- the response matrix, is taken as the reference level or baseline---this
- is for identifiability of the parameters.
- The reference or baseline level can be changed with the
- \code{refLevel} argument.
-
-
- In almost all the literature, the constraint matrices associated
- with this family of models are known. For example, setting
- \code{parallel = TRUE} will make all constraint matrices (except for
- the intercept) equal to a vector of \eqn{M} 1's. If the constraint
- matrices are unknown and to be estimated, then this can be achieved
- by fitting the model as a reduced-rank vector generalized linear model
- (RR-VGLM; see \code{\link{rrvglm}}). In particular, a multinomial logit
- model with unknown constraint matrices is known as a \emph{stereotype} model
- (Anderson, 1984), and can be fitted with \code{\link{rrvglm}}.
+ Here, \eqn{j=1,\ldots,M}, and \eqn{\eta_{M+1}}{eta_{M+1}}
+ is 0 by definition. That is, the last level of the factor,
+ or last column of the response matrix, is taken as the
+ reference level or baseline---this is for identifiability
+ of the parameters. The reference or baseline level can
+ be changed with the \code{refLevel} argument.
+
+
+ In almost all the literature, the constraint matrices
+ associated with this family of models are known. For
+ example, setting \code{parallel = TRUE} will make all
+ constraint matrices (except for the intercept) equal to
+ a vector of \eqn{M} 1's. If the constraint matrices are
+ unknown and to be estimated, then this can be achieved by
+ fitting the model as a reduced-rank vector generalized
+ linear model (RR-VGLM; see \code{\link{rrvglm}}).
+ In particular, a multinomial logit model with unknown
+ constraint matrices is known as a \emph{stereotype}
+ model (Anderson, 1984), and can be fitted with
+ \code{\link{rrvglm}}.
}
@@ -107,7 +111,8 @@ Agresti, A. (2002)
Hastie, T. J., Tibshirani, R. J. and Friedman, J. H. (2009)
-\emph{The Elements of Statistical Learning: Data Mining, Inference and Prediction},
+\emph{The Elements of Statistical Learning: Data Mining,
+ Inference and Prediction},
2nd ed.
New York: Springer-Verlag.
@@ -132,48 +137,54 @@ by the \pkg{VGAM} package can be found at
\author{ Thomas W. Yee }
\note{
- The response should be either a matrix of counts (with row sums that are
- all positive), or a factor. In both cases, the \code{y} slot returned
- by \code{\link{vglm}}/\code{\link{vgam}}/\code{\link{rrvglm}} is the
- matrix of sample proportions.
+ The response should be either a matrix of counts
+ (with row sums that are all positive), or a
+ factor. In both cases, the \code{y} slot returned by
+ \code{\link{vglm}}/\code{\link{vgam}}/\code{\link{rrvglm}}
+ is the matrix of sample proportions.
The multinomial logit model is more appropriate for a nominal
(unordered) factor response than for an ordinal (ordered) factor
response.
- Models more suited for the latter include those based on cumulative
- probabilities, e.g., \code{\link{cumulative}}.
-
-
- \code{multinomial} is prone to numerical difficulties if the groups
- are separable and/or the fitted probabilities are close to 0 or 1.
- The fitted values returned are estimates of the probabilities
- \eqn{P[Y=j]} for \eqn{j=1,\ldots,M+1}.
- See \pkg{safeBinaryRegression} for the logistic regression case.
-
-
- Here is an example of the usage of the \code{parallel} argument.
- If there are covariates \code{x2}, \code{x3} and \code{x4}, then
- \code{parallel = TRUE ~ x2 + x3 - 1} and
- \code{parallel = FALSE ~ x4} are equivalent. This would constrain
- the regression coefficients for \code{x2} and \code{x3} to be equal;
- those of the intercepts and \code{x4} would be different.
-
-
- In Example 4 below, a conditional logit model is fitted to an artificial
- data set that explores how cost and travel time affect people's
- decision about how to travel to work. Walking is the baseline group.
- The variable \code{Cost.car} is the difference between the cost of
- travel to work by car and walking, etc. The variable \code{Time.car}
- is the difference between the travel duration/time to work by car and
- walking, etc. For other details about the \code{xij} argument see
+ Models more suited for the latter include those based on
+ cumulative probabilities, e.g., \code{\link{cumulative}}.
+
+
+ \code{multinomial} is prone to numerical difficulties if
+ the groups are separable and/or the fitted probabilities
+ are close to 0 or 1. The fitted values returned
+ are estimates of the probabilities \eqn{P[Y=j]} for
+ \eqn{j=1,\ldots,M+1}. See \pkg{safeBinaryRegression}
+ for the logistic regression case.
+
+
+ Here is an example of the usage of the \code{parallel}
+ argument. If there are covariates \code{x2}, \code{x3}
+ and \code{x4}, then \code{parallel = TRUE ~ x2 + x3 -
+ 1} and \code{parallel = FALSE ~ x4} are equivalent. This
+ would constrain the regression coefficients for \code{x2}
+ and \code{x3} to be equal; those of the intercepts and
+ \code{x4} would be different.
+
+
+ In Example 4 below, a conditional logit model is
+ fitted to an artificial data set that explores how
+ cost and travel time affect people's decision about
+ how to travel to work. Walking is the baseline group.
+ The variable \code{Cost.car} is the difference between
+ the cost of travel to work by car and walking, etc. The
+ variable \code{Time.car} is the difference between
+ the travel duration/time to work by car and walking,
+ etc. For other details about the \code{xij} argument see
\code{\link{vglm.control}} and \code{\link{fill}}.
- The \code{\link[nnet]{multinom}} function in the \pkg{nnet} package
- uses the first level of the factor as baseline, whereas the last
- level of the factor is used here. Consequently the estimated
- regression coefficients differ.
+ The \code{\link[nnet]{multinom}} function in the
+ \pkg{nnet} package uses the first level of the factor as
+ baseline, whereas the last level of the factor is used
+ here. Consequently the estimated regression coefficients
+ differ.
}
diff --git a/man/nbcanlink.Rd b/man/nbcanlink.Rd
new file mode 100644
index 0000000..5512601
--- /dev/null
+++ b/man/nbcanlink.Rd
@@ -0,0 +1,174 @@
+\name{nbcanlink}
+\alias{nbcanlink}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Negative binomial canonical link function }
+\description{
+ Computes the negative binomial canonical link transformation,
+ including its inverse and the first two derivatives.
+
+}
+\usage{
+nbcanlink(theta, earg = list(), inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{theta}{
+ Numeric or character.
+ Typically the mean of a negative binomial (NB) distribution.
+ See below for further details.
+
+
+ }
+ \item{earg}{
+ List.
+ Extra argument for passing in additional information.
+ Here, a \code{size} component contains the \eqn{k} matrix which
+ must be of a conformable dimension as \code{theta}.
+ Also, if \code{deriv > 0} then a \code{wrt.eta} component
+ which is either 1 or 2 (1 for with respect to the first
+ linear predictor, and 2 for with respect to the second
+ linear predictor (a function of \eqn{k})).
+
+
+ }
+ \item{inverse}{ Logical. If \code{TRUE} the inverse function is computed. }
+ \item{deriv}{ Order of the derivative. Integer with value 0, 1 or 2. }
+ \item{short}{
+ Used for labelling the \code{blurb} slot of a
+ \code{\link{vglmff-class}} object.
+
+ }
+ \item{tag}{
+ Used for labelling the linear/additive predictor in the
+ \code{initialize} slot of a \code{\link{vglmff-class}} object.
+ Contains a little more information if \code{TRUE}.
+
+
+ }
+}
+\details{
+ The negative binomial (NB) canonical link is
+ \eqn{\log(\theta/ (\theta + k))}{log(theta/(theta + k))}
+ where \eqn{\theta}{theta} is the mean of a NB
+ distribution. The canonical link is used for theoretically
+ relating the NB to GLM class.
+
+
+ This link function was specifically written for
+ \code{\link{negbinomial}} and
+ \code{\link{negbinomial.size}},
+ and should not be used elsewhere
+ (these \pkg{VGAM} family functions have code that
+ specifically handles \code{nbcanlink()}.)
+
+
+}
+\value{
+ For \code{deriv = 0}, the above equation
+ when \code{inverse = FALSE}, and if \code{inverse = TRUE} then
+ \code{kmatrix / expm1(-theta)}.
+ For \code{deriv = 1}, then the function returns
+ \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
+ if \code{inverse = FALSE},
+ else if \code{inverse = TRUE} then it returns the reciprocal.
+
+
+}
+\references{
+
+ Yee, T. W. (2012)
+ Two-parameter reduced-rank vector generalized linear models.
+ \emph{In preparation}.
+
+
+ Hilbe, J. M. (2011)
+ \emph{Negative Binomial Regression},
+ 2nd Edition.
+ Cambridge: Cambridge University Press.
+
+
+}
+\author{ Thomas W. Yee }
+
+\section{Warning}{
+ This function currently does not work very well with \code{\link{negbinomial}}!
+ The NB-C model is sensitive to the initial values and may converge to a local solution.
+ Pages 210 and 309 of Hilbe (2011) notes convergence difficulties (of
+ Newton-Raphson type algorithms), and this applies here.
+ This function should work okay with \code{\link{negbinomial.size}}.
+ Currently trying something like \code{imethod = 3} or \code{imu},
+ \code{stepsize = 0.5}, \code{maxit = 100}, \code{zero = -2} should help;
+ see the example below.
+
+
+}
+\note{
+
+
+ While theoretically nice, this function is not recommended
+ in general since its value is always negative (linear predictors
+ ought to be unbounded in general). A \code{\link{loge}}
+ link for argument \code{lmu} is recommended instead.
+
+
+ Numerical instability may occur when \code{theta} is close to 0 or 1.
+ For the \code{earg} argument,
+ values of \code{theta} which are less than or equal to 0 can be
+ replaced by the \code{bvalue} component of the list \code{earg}
+ before computing the link function value.
+ The component name \code{bvalue} stands for ``boundary value''.
+ See \code{\link{Links}} for general information about \code{earg}.
+
+
+
+}
+
+\seealso{
+ \code{\link{negbinomial}},
+ \code{\link{negbinomial.size}}.
+
+
+}
+\examples{
+nbcanlink("mu", short = FALSE)
+
+mymu = 1:10 # Test some basic operations:
+kmatrix = matrix(runif(length(mymu)), length(mymu), 1)
+eta1 = nbcanlink(mymu, earg = list(size = kmatrix))
+ans2 = nbcanlink(eta1, earg = list(size = kmatrix), inverse = TRUE)
+max(abs(ans2 - mymu)) # Should be 0
+
+\dontrun{ mymu = c(seq(0.5, 10, length = 101))
+kmatrix = matrix(10, length(mymu), 1)
+plot(nbcanlink(mymu, earg = list(size = kmatrix)) ~ mymu, las = 1,
+ type = "l", col = "blue", lwd = 1.5, xlab = expression({mu})) }
+
+# Estimate the parameters from some simulated data (see Warning section)
+set.seed(123)
+ndata <- data.frame(x2 = runif(nn <- 1000 ))
+size1 = exp(1); size2 = exp(2)
+ndata <- transform(ndata, eta1 = -1 - 2 * x2, # eta1 < 0
+ size1 = size1,
+ size2 = size2)
+ndata <- transform(ndata,
+ mu1 = nbcanlink(eta1, earg = list(size = size1), inv = TRUE),
+ mu2 = nbcanlink(eta1, earg = list(size = size2), inv = TRUE))
+ndata <- transform(ndata, y1 = rnbinom(nn, mu = mu1, size = size1),
+ y2 = rnbinom(nn, mu = mu2, size = size2))
+head(ndata)
+summary(ndata)
+
+fit <- vglm(cbind(y1, y2) ~ x2, negbinomial("nbcanlink", imethod = 3),
+ stepsize = 0.5, ndata, # Deliberately slow the convergence rate
+ maxit = 100, trace = TRUE) # Warning: may converge to a local soln
+coef(fit, matrix = TRUE)
+summary(fit)
+}
+\keyword{math}
+\keyword{models}
+\keyword{regression}
+% abline(h = 0, col = "lightgray", lty = "dashed", lwd = 2.0)
+% The variance-covariance matrix may be wrong when the
+% canonical link is used.
+% vcov(fit) # May be wrong
diff --git a/man/nbolf.Rd b/man/nbolf.Rd
index 54f79b7..290c5f0 100644
--- a/man/nbolf.Rd
+++ b/man/nbolf.Rd
@@ -29,25 +29,30 @@ nbolf(theta, earg = stop("argument 'earg' must be given"),
\code{\link{cumulative}} then one should choose
\code{reverse = TRUE, parallel = TRUE, intercept.apply = TRUE}.
+
}
\item{inverse}{
Logical. If \code{TRUE} the inverse function is computed.
+
}
\item{deriv}{
Order of the derivative. Integer with value 0, 1 or 2.
+
}
\item{short}{
Used for labelling the \code{blurb} slot of a
\code{\link{vglmff-class}} object.
+
}
\item{tag}{
Used for labelling the linear/additive predictor in the
\code{initialize} slot of a \code{\link{vglmff-class}} object.
Contains a little more information if \code{TRUE}.
+
}
}
\details{
@@ -68,12 +73,12 @@ nbolf(theta, earg = stop("argument 'earg' must be given"),
}
\value{
- See Yee (2011) for details.
+ See Yee (2012) for details.
}
\references{
- Yee, T. W. (2011)
+ Yee, T. W. (2012)
\emph{Ordinal ordination with normalizing link functions for count data},
(in preparation).
diff --git a/man/negbinomial.Rd b/man/negbinomial.Rd
index dda0f1c..fface96 100644
--- a/man/negbinomial.Rd
+++ b/man/negbinomial.Rd
@@ -15,8 +15,8 @@ negbinomial(lmu = "loge", lsize = "loge", emu = list(), esize = list(),
Maxiter = 5000, deviance.arg = FALSE, imethod = 1,
parallel = FALSE, shrinkage.init = 0.95, zero = -2)
polya(lprob = "logit", lsize = "loge", eprob = list(), esize = list(),
- iprob = NULL, isize = NULL, quantile.probs = 0.75, nsimEIM = 100,
- deviance.arg = FALSE, imethod = 1, shrinkage.init = 0.95, zero = -2)
+ iprob = NULL, isize = NULL, quantile.probs = 0.75, nsimEIM = 100,
+ deviance.arg = FALSE, imethod = 1, shrinkage.init = 0.95, zero = -2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -70,8 +70,9 @@ polya(lprob = "logit", lsize = "loge", eprob = list(), esize = list(),
Used to specify how many terms of the infinite series
for computing the second diagonal element of the
EIM are actually used.
- The sum of the probabilites are added until they reach this value or more
- (but no more than \code{Maxiter} terms allowed).
+ The sum of the probabilites are added until they reach
+ this value or more (but no more than \code{Maxiter}
+ terms allowed).
It is like specifying \code{p} in an imaginary function
\code{qnegbin(p)}.
@@ -85,13 +86,13 @@ polya(lprob = "logit", lsize = "loge", eprob = list(), esize = list(),
}
\item{deviance.arg}{
- Logical. If \code{TRUE}, the deviance function
- is attached to the object. Under ordinary circumstances, it should
- be left alone because it really assumes the index parameter is at
- the maximum likelihood estimate. Consequently, one cannot use that
- criterion to minimize within the IRLS algorithm.
- It should be set \code{TRUE} only when used with \code{\link{cqo}}
- under the fast algorithm.
+ Logical. If \code{TRUE}, the deviance function is attached
+ to the object. Under ordinary circumstances, it should be
+ left alone because it really assumes the index parameter
+ is at the maximum likelihood estimate. Consequently,
+ one cannot use that criterion to minimize within the
+ IRLS algorithm. It should be set \code{TRUE} only when
+ used with \code{\link{cqo}} under the fast algorithm.
}
\item{imethod}{
@@ -123,11 +124,12 @@ polya(lprob = "logit", lsize = "loge", eprob = list(), esize = list(),
}
\item{zero}{
Integer valued vector, usually assigned \eqn{-2} or \eqn{2} if used
- at all. Specifies which of the two linear/additive predictors are
+ at all. Specifies which of the two linear/additive predictors are
modelled as an intercept only. By default, the \eqn{k} parameter
(after \code{lsize} is applied) is modelled as a single unknown
- number that is estimated. It can be modelled as a function of the
- explanatory variables by setting \code{zero = NULL}. A negative value
+ number that is estimated. It can be modelled as a function of the
+ explanatory variables by setting \code{zero = NULL}; this has been
+ called a NB-H model by Hilbe (2011). A negative value
means that the value is recycled, so setting \eqn{-2} means all \eqn{k}
are intercept-only.
See \code{\link{CommonVGAMffArguments}} for more information.
@@ -179,10 +181,10 @@ polya(lprob = "logit", lsize = "loge", eprob = list(), esize = list(),
The negative binomial distribution can be coerced into the
classical GLM framework with one of the parameters being
of interest and the other treated as a nuisance/scale
- parameter (this is implemented in the MASS library). The
+ parameter (this is implemented in the \pkg{MASS} library). The
\pkg{VGAM} family function \code{negbinomial} treats both
parameters on the same footing, and estimates them both
- by full maximum likelihood estimation. Simulated Fisher
+ by full maximum likelihood estimation. Simulated Fisher
scoring is employed as the default (see the \code{nsimEIM}
argument).
@@ -215,14 +217,15 @@ polya(lprob = "logit", lsize = "loge", eprob = list(), esize = list(),
estimate of the index parameter is fraught (see Lawless,
1987). In general, the \code{\link{quasipoissonff}} is
more robust. Other alternatives to \code{negbinomial} are
- to fit a NB-1 or RR-NB model; see Yee (2011). Assigning
- values to the \code{isize} argument may lead to a local
- solution, and smaller values are preferred over large
- values when using this argument.
+ to fit a NB-1 or RR-NB (aka NB-P) model; see Yee (2012).
+ Also available are the NB-C, NB-H and NB-G.
+ Assigning values to the \code{isize} argument may lead
+ to a local solution, and smaller values are preferred
+ over large values when using this argument.
- Yet to do: write a family function which uses the methods of moments
- estimator for \eqn{k}.
+ Yet to do: write a family function which uses the methods
+ of moments estimator for \eqn{k}.
}
@@ -240,8 +243,9 @@ Negative binomial and mixed Poisson regression.
\bold{15}, 209--225.
-Hilbe, J. M. (2007)
-\emph{Negative Binomial Regression}.
+Hilbe, J. M. (2011)
+\emph{Negative Binomial Regression},
+2nd Edition.
Cambridge: Cambridge University Press.
@@ -251,7 +255,7 @@ Fitting the negative binomial distribution to biological data.
\bold{9}, 174--200.
- Yee, T. W. (2011)
+ Yee, T. W. (2012)
Two-parameter reduced-rank vector generalized linear models.
\emph{In preparation}.
@@ -338,7 +342,7 @@ Fitting the negative binomial distribution to biological data.
inclusive of quasi-Poisson and negative binomial regression.
This is known as a reduced-rank negative binomial model \emph{(RR-NB)}.
It fits a negative binomial log-linear regression with variance function
- \eqn{Var(Y) = \mu + \delta_1 \mu^{\delta_2}}{Var(Y) = mu + delta1 * mu^delta2}
+ \eqn{Var(Y)=\mu+\delta_1 \mu^{\delta_2}}{Var(Y) = mu + delta1 * mu^delta2}
where \eqn{\delta_1}{delta1}
and \eqn{\delta_2}{delta2}
are parameters to be estimated by MLE.
@@ -368,6 +372,8 @@ Fitting the negative binomial distribution to biological data.
\code{\link{quasipoissonff}},
\code{\link{poissonff}},
\code{\link{zinegbinomial}},
+ \code{\link{negbinomial.size}} (e.g., NB-G),
+ \code{\link{nbcanlink}} (NB-C),
\code{\link{posnegbinomial}},
\code{\link{invbinomial}},
% \code{\link[MASS]{rnegbin}}.
diff --git a/man/negbinomial.size.Rd b/man/negbinomial.size.Rd
new file mode 100644
index 0000000..c2caea0
--- /dev/null
+++ b/man/negbinomial.size.Rd
@@ -0,0 +1,134 @@
+\name{negbinomial.size}
+\alias{negbinomial.size}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Negative Binomial Distribution Family Function With Known Size}
+\description{
+ Maximum likelihood estimation of the mean parameter of a negative
+ binomial distribution with known size parameter.
+
+}
+\usage{
+negbinomial.size(size = Inf, lmu = "loge", emu = list(), imu = NULL,
+ quantile.probs = 0.75, imethod = 1,
+ shrinkage.init = 0.95, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{size}{
+ Numeric, positive.
+ Same as argument \code{size} of \code{\link[stats:NegBinomial]{rnbinom}}.
+ If the response is a matrix then this is recycled to a matrix of
+ the same dimension, by row
+ (\code{\link[base]{matrix}} with \code{byrow = TRUE}).
+
+
+ }
+
+ \item{lmu, emu, imu}{
+ Same as \code{\link{negbinomial}}.
+
+
+ }
+ \item{quantile.probs}{
+ Same as \code{\link{negbinomial}}.
+
+
+ }
+ \item{imethod, zero}{
+ Same as \code{\link{negbinomial}}.
+
+
+ }
+ \item{shrinkage.init}{
+ Same as \code{\link{negbinomial}}.
+
+
+ }
+
+}
+\details{
+ This \pkg{VGAM} family function estimates only the mean parameter of
+ the negative binomial distribution.
+ See \code{\link{negbinomial}} for general information.
+ Setting \code{size = 1} gives what I call the NB-G (geometric model;
+ see Hilbe (2011)).
+ The default, \code{size = Inf}, corresponds to the Poisson distribution.
+
+
+}
+%\section{Warning}{
+%
+%}
+
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ \code{\link{rrvglm}}
+ and \code{\link{vgam}}.
+
+
+}
+\references{
+
+Hilbe, J. M. (2011)
+\emph{Negative Binomial Regression},
+2nd Edition.
+Cambridge: Cambridge University Press.
+
+
+ Yee, T. W. (2012)
+ Two-parameter reduced-rank vector generalized linear models.
+ \emph{In preparation}.
+
+
+
+}
+\author{ Thomas W. Yee }
+\note{
+ If \code{lmu = "nbcanlink"} in \code{negbinomial.size()} then
+ the \code{size} argument here is placed inside the \code{earg}
+ argument of \code{nbcanlink()} as a matrix with conformable size.
+
+
+}
+
+\seealso{
+ \code{\link{negbinomial}},
+ \code{\link{nbcanlink}} (NB-C model),
+ \code{\link{quasipoissonff}},
+ \code{\link{poissonff}},
+% \code{\link[MASS]{rnegbin}}.
+ \code{\link[stats:NegBinomial]{rnbinom}}.
+
+
+}
+\examples{
+# Simulated data with various multiple responses
+size1 = exp(1); size2 = exp(2); size3 = exp(0); size4 = Inf
+ndata <- data.frame(x2 = runif(nn <- 1000))
+ndata <- transform(ndata, eta1 = -1 - 2 * x2, # eta1 must be negative
+ size1 = size1)
+ndata <- transform(ndata,
+ mu1 = nbcanlink(eta1, earg = list(size = size1), inv = TRUE))
+ndata <- transform(ndata,
+ y1 = rnbinom(nn, mu = mu1, size = size1), # NB-C
+ y2 = rnbinom(nn, mu = exp(2 - x2), size = size2),
+ y3 = rnbinom(nn, mu = exp(3 + x2), size = size3), # NB-G
+ y4 = rpois (nn, la = exp(1 + x2)))
+
+# Also known as NB-C with size known (Hilbe, 2011)
+fit1 <- vglm(y1 ~ x2, negbinomial.size(size = size1, lmu = "nbcanlink"),
+ ndata, trace = TRUE, crit = "coef")
+coef(fit1, matrix = TRUE)
+head(fit1 at misc$size) # size saved here
+
+fit2 <- vglm(cbind(y2, y3, y4) ~ x2,
+ negbinomial.size(size = c(size2, size3, size4)),
+ ndata, trace = TRUE)
+coef(fit2, matrix = TRUE)
+head(fit2 at misc$size) # size saved here
+}
+\keyword{models}
+\keyword{regression}
+
+
diff --git a/man/normal1.Rd b/man/normal1.Rd
index 7fd761c..d9057c7 100644
--- a/man/normal1.Rd
+++ b/man/normal1.Rd
@@ -8,26 +8,42 @@
}
\usage{
-normal1(lmean = "identity", lsd = "loge",
- emean = list(), esd = list(), imethod = 1, zero = -2)
+normal1(lmean = "identity", lsd = "loge", lvar = "loge",
+ emean = list(), esd = list(), evar = list(),
+ var.arg = FALSE, imethod = 1, isd = NULL, parallel = FALSE,
+ intercept.apply = FALSE, zero = -2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{lmean, lsd}{
- Link functions applied to the mean and standard deviation.
+ \item{lmean, lsd, lvar}{
+ Link functions applied to the mean and standard deviation/variance.
See \code{\link{Links}} for more choices.
- Being a positive quantity, a log link is the default for the
- standard deviation.
+ Being positive quantities, a log link is the default for the
+ standard deviation and variance (see \code{var.arg}).
+
}
- \item{emean, esd}{
+ \item{emean, esd, evar}{
List. Extra argument for the links.
See \code{earg} in \code{\link{Links}} for general information.
+
}
- \item{imethod, zero}{
+ \item{var.arg}{
+ Logical.
+ If \code{TRUE} then the second parameter is the variance and
+ \code{lsd} and \code{esd} are ignored,
+ else the standard deviation is used
+ and \code{lvar} and \code{evar} are ignored.
+
+
+ }
+ \item{imethod, parallel, isd, intercept.apply, zero}{
See \code{\link{CommonVGAMffArguments}} for more information.
If \code{lmean = loge} then try \code{imethod = 2}.
+ Argument \code{intercept.apply} refers to whether the parallelism
+ constraint is applied to the intercept too.
+
}
@@ -62,7 +78,6 @@ normal1(lmean = "identity", lsd = "loge",
\note{
Yet to do: allow an argument such as \code{sameSD} that enables the
standard devations to be the same.
- And a \code{parallel} argument.
}
@@ -85,9 +100,16 @@ normal1(lmean = "identity", lsd = "loge",
}
\examples{
ndata <- data.frame(x2 = rnorm(nn <- 200))
-ndata <- transform(ndata, y = rnorm(nn, mean = 1-3*x2, sd = exp(1+0.2*x2)))
-fit <- vglm(y ~ x2, normal1(zero = NULL), ndata, trace = TRUE)
-coef(fit, matrix = TRUE)
+ndata <- transform(ndata,
+ y1 = rnorm(nn, mean = 1-3*x2, sd = exp(1+0.2*x2)),
+ y2 = rnorm(nn, mean = 1+2*x2, sd = exp(1+ 2*x2)^0.5),
+ y3 = rnorm(nn, mean = 1+2*x2, sd = exp(1+ 2*x2)^0.5))
+fit1 <- vglm(y1 ~ x2, normal1(zero = NULL), ndata, trace = TRUE)
+coef(fit1, matrix = TRUE)
+fit2 <- vglm(cbind(y2, y3) ~ x2, data = ndata, trace = TRUE,
+ normal1(var = TRUE, parallel = TRUE,
+ intercept.apply = TRUE, zero = NULL))
+coef(fit2, matrix = TRUE)
# Generate data from N(mu = theta = 10, sigma = theta) and estimate theta.
theta <- 10
diff --git a/man/notdocumentedyet.Rd b/man/notdocumentedyet.Rd
index ed93c06..8dbfe1e 100644
--- a/man/notdocumentedyet.Rd
+++ b/man/notdocumentedyet.Rd
@@ -3,6 +3,71 @@
%
%
%
+% 20120215
+% \alias{print.vglmff}
+\alias{show.vglmff}
+% \alias{print.vfamily}
+% \alias{show.Coef.rrar}
+% \alias{family.vglm}
+\alias{show.vgam}
+\alias{show.vglm}
+\alias{show.vlm}
+% \alias{print.vgam}
+% \alias{print.vglm}
+% \alias{print.vlm}
+% \alias{print.vlm.wfit}
+%
+%
+%
+%
+% 20120112
+\alias{AIC}
+\alias{coef}
+\alias{logLik}
+\alias{plot}
+\alias{show.summary.vglm}
+\alias{vcov}
+\alias{vcovvlm}
+\alias{VGAMenv}
+\alias{nobs}
+\alias{show.Coef.cao}
+\alias{show.Coef.qrrvglm}
+\alias{show.Coef.rrvglm}
+\alias{show.rrvglm}
+\alias{show.summary.cao}
+% \alias{show.summary.lms}
+\alias{show.summary.qrrvglm}
+% \alias{show.summary.rc.exponential}
+\alias{show.summary.rrvglm}
+\alias{show.summary.uqo}
+\alias{show.summary.vgam}
+\alias{show.summary.vglm}
+\alias{show.summary.vlm}
+\alias{show.uqo}
+\alias{show.vanova}
+\alias{show.vsmooth.spline}
+%
+%
+%
+%
+%
+%
+%
+%
+%
+% 20111224; lrtest and waldtest stuff
+%\alias{lrtest}
+%\alias{lrtest_vglm}
+%\alias{print_anova}
+\alias{update_default}
+\alias{update_formula}
+%
+%\alias{waldtest}
+%\alias{waldtest_vglm}
+%\alias{waldtest_default}
+%\alias{waldtest_formula}
+%
+%
%
%
% 20110202; 20110317; James Lauder work
@@ -25,6 +90,10 @@
\alias{fibre15}
%
%
+% 20120206; for RR-NB, or rrn.tex.
+\alias{plota21}
+%
+%
% 20110202; for Melbourne; these include datasets.
\alias{azprocedure}
\alias{confint_rrnb}
@@ -34,6 +103,12 @@
%
%
%
+%20111128; basics
+\alias{is.empty.list}
+%
+%
+%
+%
%20101222; Alfian work
%\alias{Rcam} % Has been written
%\alias{plotrcam0} % Has been written
@@ -183,7 +258,6 @@
\alias{eta2theta}
%\alias{explink}
% \alias{extract.arg}
-\alias{family.vglm}
%\alias{felix}
%\alias{dfelix}
\alias{fff.control}
@@ -311,28 +385,6 @@
\alias{predictvsmooth.spline.fit}
% \alias{preplotvgam}
\alias{print}
-% \alias{print.vanova}
-% \alias{print.vfamily}
-% \alias{print.vgam}
-% \alias{print.vglm}
-% \alias{print.vglmff}
-% \alias{print.vlm}
-% \alias{print.vlm.wfit}
-% \alias{printCoef.cao}
-% \alias{printCoef.qrrvglm}
-% \alias{printCoef.rrvglm}
-% \alias{printrrvglm}
-% \alias{printsummary.cao}
-% \alias{printsummary.lms}
-% \alias{printsummary.qrrvglm}
-% \alias{printsummary.rc.exponential}
-% \alias{printsummary.rrvglm}
-% \alias{printsummary.uqo}
-% \alias{printsummary.vgam}
-% \alias{printsummary.vglm}
-% \alias{printsummary.vlm}
-% \alias{printuqo}
-% \alias{printvsmooth.spline}
\alias{procVec}
\alias{negzero.expression}
\alias{process.binomial2.data.vgam}
diff --git a/man/olympic.Rd b/man/olympic.Rd
index c85c0ae..a4cfe41 100644
--- a/man/olympic.Rd
+++ b/man/olympic.Rd
@@ -47,7 +47,7 @@ summary(olympic)
## maybe str(olympic) ; plot(olympic) ...
\dontrun{ with(head(olympic, n = 8),
barplot(rbind(gold,silver,bronze),
- col = c("gold","grey","brown"), # No "silver" or "bronze"!
+ col = c("gold","grey","brown"), # No "silver" or "bronze"!
names.arg = country, cex.names = 0.5,
beside = TRUE, main = "2008 Summer Olympic Final Medal Count",
ylab = "Medal count", las = 1,
diff --git a/man/ordpoisson.Rd b/man/ordpoisson.Rd
index 4e70a9b..7ff3acf 100644
--- a/man/ordpoisson.Rd
+++ b/man/ordpoisson.Rd
@@ -97,7 +97,7 @@ ordpoisson(cutpoints, countdata = FALSE, NOS = NULL,
\references{
- Yee, T. W. (2011)
+ Yee, T. W. (2012)
\emph{Ordinal ordination with normalizing link functions for count data},
(in preparation).
diff --git a/man/paralogistic.Rd b/man/paralogistic.Rd
index 55b0642..6e32e0b 100644
--- a/man/paralogistic.Rd
+++ b/man/paralogistic.Rd
@@ -7,24 +7,24 @@
paralogistic distribution.
}
\usage{
-paralogistic(link.a = "loge", link.scale = "loge", earg.a = list(),
- earg.scale = list(), init.a = 1, init.scale = NULL, zero = NULL)
+paralogistic(lshape1.a = "loge", lscale = "loge", eshape1.a = list(),
+ escale = list(), ishape1.a = 2, iscale = NULL, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link.a, link.scale}{
+ \item{lshape1.a, lscale}{
Parameter link functions applied to the
(positive) shape parameter \code{a} and
(positive) scale parameter \code{scale}.
See \code{\link{Links}} for more choices.
}
- \item{earg.a, earg.scale}{
+ \item{eshape1.a, escale}{
List. Extra argument for each of the links.
See \code{earg} in \code{\link{Links}} for general information.
}
- \item{init.a, init.scale}{
+ \item{ishape1.a, iscale}{
Optional initial values for \code{a} and \code{scale}.
}
@@ -53,7 +53,7 @@ and \eqn{a} is the shape parameter.
The mean is
\deqn{E(Y) = b \, \Gamma(1 + 1/a) \, \Gamma(a - 1/a) / \Gamma(a)}{%
E(Y) = b gamma(1 + 1/a) gamma(a - 1/a) / gamma(a)}
-provided \eqn{a > 1}.
+provided \eqn{a > 1}; these are returned as the fitted values.
}
@@ -68,7 +68,7 @@ provided \eqn{a > 1}.
Kleiber, C. and Kotz, S. (2003)
\emph{Statistical Size Distributions in Economics and Actuarial Sciences},
-Hoboken, NJ: Wiley-Interscience.
+Hoboken, NJ, USA: Wiley-Interscience.
}
@@ -95,10 +95,10 @@ default value is not \code{NULL}.
}
\examples{
-pdat = data.frame(y = rparalogistic(n = 3000, 4, 6))
-fit = vglm(y ~ 1, paralogistic, pdat, trace = TRUE)
-fit = vglm(y ~ 1, paralogistic(init.a = 2.3, init.sc = 5),
- pdat, trace = TRUE, crit = "c")
+pdata = data.frame(y = rparalogistic(n = 3000, 4, 6))
+fit = vglm(y ~ 1, paralogistic, pdata, trace = TRUE)
+fit = vglm(y ~ 1, paralogistic(ishape1.a = 2.3, iscale = 5),
+ pdata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/ParalogisticUC.Rd b/man/paralogisticUC.Rd
similarity index 78%
rename from man/ParalogisticUC.Rd
rename to man/paralogisticUC.Rd
index 341e8e4..03d41cb 100644
--- a/man/ParalogisticUC.Rd
+++ b/man/paralogisticUC.Rd
@@ -11,17 +11,17 @@
and scale parameter \code{scale}.
}
\usage{
-dparalogistic(x, a, scale=1, log=FALSE)
-pparalogistic(q, a, scale=1)
-qparalogistic(p, a, scale=1)
-rparalogistic(n, a, scale=1)
+dparalogistic(x, shape1.a, scale = 1, log = FALSE)
+pparalogistic(q, shape1.a, scale = 1)
+qparalogistic(p, shape1.a, scale = 1)
+rparalogistic(n, shape1.a, scale = 1)
}
\arguments{
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
\item{n}{number of observations. If \code{length(n) > 1}, the length
is taken to be the number required.}
- \item{a}{shape parameter.}
+ \item{shape1.a}{shape parameter.}
\item{scale}{scale parameter.}
\item{log}{
Logical.
@@ -35,30 +35,38 @@ rparalogistic(n, a, scale=1)
\code{pparalogistic} gives the distribution function,
\code{qparalogistic} gives the quantile function, and
\code{rparalogistic} generates random deviates.
+
}
\references{
+
Kleiber, C. and Kotz, S. (2003)
\emph{Statistical Size Distributions in Economics and
Actuarial Sciences},
-Hoboken, NJ: Wiley-Interscience.
+Hoboken, NJ, USA: Wiley-Interscience.
+
}
\author{ T. W. Yee }
\details{
See \code{\link{paralogistic}}, which is the \pkg{VGAM} family function
for estimating the parameters by maximum likelihood estimation.
+
+
}
\note{
The paralogistic distribution is a special case of the 4-parameter
generalized beta II distribution.
+
+
}
\seealso{
\code{\link{paralogistic}},
\code{\link{genbetaII}}.
+
}
\examples{
-y = rparalogistic(n=3000, 4, 6)
-fit = vglm(y ~ 1, paralogistic(init.a=2.1), trace=TRUE, crit="c")
-coef(fit, mat=TRUE)
+pdata = data.frame(y = rparalogistic(n = 3000, 4, 6))
+fit = vglm(y ~ 1, paralogistic(ishape1.a = 2.1), pdata, trace = TRUE)
+coef(fit, matrix = TRUE)
Coef(fit)
}
\keyword{distribution}
diff --git a/man/pareto1.Rd b/man/pareto1.Rd
index b407b0b..a237227 100644
--- a/man/pareto1.Rd
+++ b/man/pareto1.Rd
@@ -147,19 +147,22 @@ tpareto1(lower, upper, lshape = "loge", earg = list(), ishape = NULL,
}
\section{Warning }{
- The usual or unbounded Pareto distribution has two parameters
- (called \eqn{\alpha}{alpha} and \eqn{k} here) but the family
- function \code{pareto1} estimates only \eqn{k} using iteratively
- reweighted least squares. The MLE of the \eqn{\alpha}{alpha}
- parameter lies on the boundary and is \code{min(y)} where \code{y}
- is the response. Consequently, using the default argument values,
- the standard errors are incorrect when one does a \code{summary}
- on the fitted object. If the user inputs a value for \code{alpha}
- then it is assumed known with this value and then \code{summary} on
- the fitted object should be correct. Numerical problems may occur
- for small \eqn{k}, e.g., \eqn{k < 1}.
-
-}
+ The usual or unbounded Pareto distribution has two
+ parameters (called \eqn{\alpha}{alpha} and \eqn{k} here)
+ but the family function \code{pareto1} estimates only
+ \eqn{k} using iteratively reweighted least squares. The
+ MLE of the \eqn{\alpha}{alpha} parameter lies on the
+ boundary and is \code{min(y)} where \code{y} is the
+ response. Consequently, using the default argument
+ values, the standard errors are incorrect when one does a
+ \code{summary} on the fitted object. If the user inputs
+ a value for \code{alpha} then it is assumed known with
+ this value and then \code{summary} on the fitted object
+ should be correct. Numerical problems may occur for small
+ \eqn{k}, e.g., \eqn{k < 1}.
+
+
+}
\seealso{
\code{\link{Pareto}},
\code{\link{Tpareto}},
@@ -179,7 +182,7 @@ coef(fit, matrix = TRUE)
summary(fit) # Standard errors are incorrect!!
# Here, alpha is assumed known
-fit2 = vglm(y ~ 1, pareto1(location = alpha), pdat, trace = TRUE, crit = "c")
+fit2 = vglm(y ~ 1, pareto1(location = alpha), pdat, trace = TRUE, crit = "coef")
fit2 at extra # alpha stored here
head(fitted(fit2))
coef(fit2, matrix = TRUE)
@@ -187,13 +190,15 @@ summary(fit2) # Standard errors are okay
# Upper truncated Pareto distribution
lower = 2; upper = 8; kay = exp(2)
-pdat3 = data.frame(y = rtpareto(n = 100, lower = lower, upper = upper, shape = kay))
-fit3 = vglm(y ~ 1, tpareto1(lower, upper), pdat3, trace = TRUE, cri = "c")
+pdat3 = data.frame(y = rtpareto(n = 100, lower = lower,
+ upper = upper, shape = kay))
+fit3 = vglm(y ~ 1, tpareto1(lower, upper), pdat3, trace = TRUE, cri = "coef")
coef(fit3, matrix = TRUE)
c(fit3 at misc$lower, fit3 at misc$upper)
}
\keyword{models}
\keyword{regression}
-% Package lmomco fits generalized pareto (three parameter) using method of L-moments.
+% Package lmomco fits generalized pareto (three parameter) using
+% method of L-moments.
diff --git a/man/paretoIV.Rd b/man/paretoIV.Rd
index 56cdcb3..4603b5c 100644
--- a/man/paretoIV.Rd
+++ b/man/paretoIV.Rd
@@ -70,21 +70,24 @@ paretoII(location = 0, lscale = "loge", lshape = "loge",
\eqn{s} the \emph{shape} parameter.
- The location parameter is assumed known otherwise the Pareto(IV)
- distribution will not be a regular family. This assumption is not too
- restrictive in modelling because in typical applications this parameter
- is known, e.g., in insurance and reinsurance it is pre-defined by a
- contract and can be represented as a deductible or a retention level.
+ The location parameter is assumed known otherwise the
+ Pareto(IV) distribution will not be a regular family.
+ This assumption is not too restrictive in modelling
+ because in typical applications this parameter is known,
+ e.g., in insurance and reinsurance it is pre-defined by
+ a contract and can be represented as a deductible or a
+ retention level.
- The inequality parameter is so-called because of its interpretation
- in the economics context. If we choose a unit shape parameter value
- and a zero location parameter value then the inequality parameter
- is the Gini index of inequality, provided \eqn{g \leq 1}{g<=1}.
+ The inequality parameter is so-called because of its
+ interpretation in the economics context. If we choose a
+ unit shape parameter value and a zero location parameter
+ value then the inequality parameter is the Gini index of
+ inequality, provided \eqn{g \leq 1}{g<=1}.
- The fitted values are currently \code{NA} because I haven't worked
- out what the mean of \eqn{Y} is yet.
+ The fitted values are currently \code{NA} because I
+ haven't worked out what the mean of \eqn{Y} is yet.
% The mean of \eqn{Y} is
@@ -147,13 +150,13 @@ Fairland, Maryland: International Cooperative Publishing House.
%\section{Warning }{
% The Pareto(IV) distribution is very general,
% for example, special cases include the Pareto(I), Pareto(II),
-% Pareto(III), and Burr family of distributions. Consequently, reasonably
+% Pareto(III), and Burr family of distributions. Consequently, reasonably
% good initial values are recommended, and convergence to a local solution
% may occur. For this reason setting \code{trace=TRUE} is a good idea
% for monitoring the convergence.
% Large samples are ideally required to get reasonable results.
%
-%}
+%}
\seealso{
\code{\link{ParetoIV}},
\code{\link{pareto1}},
@@ -162,9 +165,10 @@ Fairland, Maryland: International Cooperative Publishing House.
}
\examples{
-pdat = data.frame(y = rparetoIV(2000, scal = exp(1), ineq = exp(-0.3), shap = exp(1)))
-\dontrun{par(mfrow = c(2,1)); with(pdat, hist(y)); with(pdat, hist(log(y))) }
-fit = vglm(y ~ 1, paretoIV, pdat, trace = TRUE)
+pdata = data.frame(y = rparetoIV(2000, scal = exp(1),
+ ineq = exp(-0.3), shape = exp(1)))
+\dontrun{par(mfrow = c(2,1)); with(pdata, hist(y)); with(pdata, hist(log(y))) }
+fit = vglm(y ~ 1, paretoIV, pdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/ParetoIVUC.Rd b/man/paretoIVUC.Rd
similarity index 100%
rename from man/ParetoIVUC.Rd
rename to man/paretoIVUC.Rd
diff --git a/man/plotdeplot.lmscreg.Rd b/man/plotdeplot.lmscreg.Rd
index 52dfbac..9aec090 100644
--- a/man/plotdeplot.lmscreg.Rd
+++ b/man/plotdeplot.lmscreg.Rd
@@ -104,7 +104,7 @@ contains further information and examples.
}
\examples{
-fit = vgam(BMI ~ s(age, df = c(4,2)), fam = lms.bcn(zero = 1), data = bminz)
+fit = vgam(BMI ~ s(age, df = c(4,2)), fam = lms.bcn(zero = 1), data = bmi.nz)
\dontrun{ y = seq(15, 43, by = 0.25)
deplot(fit, x0 = 20, y = y, xlab = "BMI", col = "green", llwd = 2,
main = "BMI distribution at ages 20 (green), 40 (blue), 60 (orange)")
diff --git a/man/plotqtplot.lmscreg.Rd b/man/plotqtplot.lmscreg.Rd
index 2ad0c76..e0fddbe 100644
--- a/man/plotqtplot.lmscreg.Rd
+++ b/man/plotqtplot.lmscreg.Rd
@@ -98,7 +98,7 @@ contains further information and examples.
}
\examples{\dontrun{
-fit = vgam(BMI ~ s(age, df = c(4,2)), fam = lms.bcn(zero = 1), data = bminz)
+fit = vgam(BMI ~ s(age, df = c(4,2)), fam = lms.bcn(zero = 1), data = bmi.nz)
qtplot(fit)
qtplot(fit, perc = c(25,50,75,95), lcol = "blue", tcol = "blue", llwd = 2)
}
diff --git a/man/plotvgam.Rd b/man/plotvgam.Rd
index 64f89c4..aa9a397 100644
--- a/man/plotvgam.Rd
+++ b/man/plotvgam.Rd
@@ -11,7 +11,7 @@
plotvgam(x, newdata = NULL, y = NULL, residuals = NULL,
rugplot = TRUE, se = FALSE, scale = 0, raw = TRUE,
offset.arg = 0, deriv.arg = 0, overlay = FALSE,
- type.residuals = c("deviance","working","pearson","response"),
+ type.residuals = c("deviance", "working", "pearson", "response"),
plot.arg = TRUE, which.term = NULL, which.cf = NULL,
control = plotvgam.control(...), varxij = 1, ...)
}
@@ -19,7 +19,8 @@ plotvgam(x, newdata = NULL, y = NULL, residuals = NULL,
\arguments{
\item{x}{ A fitted \pkg{VGAM} object, e.g., produced by
- \code{vgam()}, \code{vglm()}, or \code{rrvglm()}. }
+ \code{\link{vgam}}, \code{\link{vglm}}, or \code{\link{rrvglm}}.
+ }
\item{newdata}{ Data frame.
May be used to reconstruct the original data set. }
\item{y}{ Unused. }
@@ -49,51 +50,68 @@ plotvgam(x, newdata = NULL, y = NULL, residuals = NULL,
The \code{raw} argument is directly fed into \code{predict.vgam()}.
}
- \item{offset.arg}{ Numerical vector of length \eqn{r}.
+ \item{offset.arg}{
+ Numerical vector of length \eqn{r}.
These are added to the component functions. Useful for
separating out the functions when \code{overlay} is \code{TRUE}.
If \code{overlay} is \code{TRUE} and there is one covariate then
using the intercept values as the offsets can be a good idea.
+
}
- \item{deriv.arg}{ Numerical. The order of the derivative.
+ \item{deriv.arg}{
+ Numerical. The order of the derivative.
Should be assigned an small
integer such as 0, 1, 2. Only applying to \code{s()} terms,
it plots the derivative.
+
}
- \item{overlay}{ Logical. If \code{TRUE} then component functions of the same
+ \item{overlay}{
+ Logical. If \code{TRUE} then component functions of the same
covariate are overlaid on each other.
The functions are centered, so \code{offset.arg} can be useful
when \code{overlay} is \code{TRUE}.
+
}
- \item{type.residuals}{ if \code{residuals} is \code{TRUE} then the first
+ \item{type.residuals}{
+ if \code{residuals} is \code{TRUE} then the first
possible value
of this vector, is used to specify the type of
- residual. }
- \item{plot.arg}{ Logical. If \code{FALSE} then no plot is produced. }
- \item{which.term}{ Character or integer vector containing all
- terms to be
- plotted, e.g., \code{which.term=c("s(age)", "s(height"))} or
- \code{which.term=c(2,5,9)}.
- By default, all are plotted. }
+ residual.
+ }
+
+ \item{plot.arg}{
+ Logical. If \code{FALSE} then no plot is produced. }
+ \item{which.term}{
+ Character or integer vector containing all terms to be
+ plotted, e.g., \code{which.term = c("s(age)", "s(height"))} or
+ \code{which.term = c(2, 5, 9)}.
+ By default, all are plotted.
+
+ }
\item{which.cf}{ An integer-valued vector specifying which
linear/additive predictors are to be plotted.
The values must be from the set \{1,2,\ldots,\eqn{r}\}.
By default, all are plotted.
+
}
- \item{control}{ Other control parameters. See
- \code{\link{plotvgam.control}}. }
- \item{\dots}{ Other arguments that can be fed into
- \code{\link{plotvgam.control}}. This includes line colors,
- line widths, line types, etc.
+ \item{control}{
+ Other control parameters. See \code{\link{plotvgam.control}}.
+
+ }
+ \item{\dots}{
+ Other arguments that can be fed into
+ \code{\link{plotvgam.control}}. This includes line colors,
+ line widths, line types, etc.
+
}
\item{varxij}{ Positive integer.
Used if \code{xij} of \code{\link{vglm.control}} was used,
this chooses which inner argument the component is plotted against.
- This argument is related to \code{raw=TRUE} and terms such as
+ This argument is related to \code{raw = TRUE} and terms such as
\code{NS(dum1,dum2)} and constraint matrices that have more than
one column. The default would plot the smooth against \code{dum1}
- but setting \code{varxij=2} could mean plotting the smooth against
+ but setting \code{varxij = 2} could mean plotting the smooth against
\code{dum2}.
See the \pkg{VGAM} website for further information.
@@ -159,11 +177,14 @@ contains further information and examples.
}
\examples{
coalminers = transform(coalminers, Age = (age - 42) / 5)
-fit = vgam(cbind(nBnW,nBW,BnW,BW) ~ s(Age), binom2.or(zero = NULL), coalminers)
+fit = vgam(cbind(nBnW, nBW, BnW, BW) ~ s(Age),
+ binom2.or(zero = NULL), coalminers)
\dontrun{ par(mfrow = c(1,3))
-plot(fit, se = TRUE, ylim = c(-3,2), las = 1)
-plot(fit, se = TRUE, which.cf = 1:2, lcol = "blue", scol = "orange", ylim = c(-3,2))
-plot(fit, se = TRUE, which.cf = 1:2, lcol = "blue", scol = "orange", overlay = TRUE) }
+plot(fit, se = TRUE, ylim = c(-3, 2), las = 1)
+plot(fit, se = TRUE, which.cf = 1:2, lcol = "blue", scol = "orange",
+ ylim = c(-3, 2))
+plot(fit, se = TRUE, which.cf = 1:2, lcol = "blue", scol = "orange",
+ overlay = TRUE) }
}
\keyword{models}
\keyword{regression}
diff --git a/man/plotvgam.control.Rd b/man/plotvgam.control.Rd
index 09646cc..1773e34 100644
--- a/man/plotvgam.control.Rd
+++ b/man/plotvgam.control.Rd
@@ -44,8 +44,12 @@ plotvgam.control(which.cf = NULL,
Fed into \code{par(lwd)}. }
\item{slwd}{ Line width of the standard error bands.
Fed into \code{par(lwd)}. }
- \item{add.arg}{ Logical. If \code{TRUE} then the plot will be added
- to an existing plot, otherwise a new plot will be made. }
+ \item{add.arg}{ Logical.
+ If \code{TRUE} then the plot will be added to an existing
+ plot, otherwise a new plot will be made.
+
+ }
+
\item{one.at.a.time}{ Logical. If \code{TRUE} then the plots are done
one at a time, with the user having to hit the return key
between the plots. }
diff --git a/man/polf.Rd b/man/polf.Rd
index 86b74bd..7de881d 100644
--- a/man/polf.Rd
+++ b/man/polf.Rd
@@ -24,7 +24,7 @@ polf(theta, earg = stop("argument 'earg' must be given"),
The cutpoints should be non-negative integers.
If \code{polf()} is used as the link function in
\code{\link{cumulative}} then one should choose
- \code{reverse=TRUE, parallel=TRUE, intercept.apply=TRUE}.
+ \code{reverse = TRUE, parallel = TRUE, intercept.apply = TRUE}.
}
\item{inverse}{
@@ -54,22 +54,27 @@ polf(theta, earg = stop("argument 'earg' must be given"),
an ordinal response coming from an underlying Poisson distribution.
If the cutpoint is zero then a complementary log-log link is used.
+
The arguments \code{short} and \code{tag} are used only if
\code{theta} is character.
+
See \code{\link{Links}} for general information about \pkg{VGAM}
link functions.
+
}
\value{
- See Yee (2007) for details.
+ See Yee (2012) for details.
+
}
\references{
- Yee, T. W. (2007)
+ Yee, T. W. (2012)
\emph{Ordinal ordination with normalizing link functions for count data},
(in preparation).
+
}
\author{ Thomas W. Yee }
@@ -81,16 +86,19 @@ polf(theta, earg = stop("argument 'earg' must be given"),
\code{theta} is too close to 1 or 0,
numerical instabilities may still arise.
+
In terms of the threshold approach with cumulative probabilities for
an ordinal response this link function corresponds to the
Poisson distribution (see \code{\link{poissonff}}) that has been
recorded as an ordinal response using known cutpoints.
+
}
\section{Warning }{
Prediction may not work on \code{\link{vglm}} or
\code{\link{vgam}} etc. objects if this link function is used.
+
}
\seealso{
@@ -100,24 +108,26 @@ polf(theta, earg = stop("argument 'earg' must be given"),
\code{\link{nbolf}},
\code{\link{golf}},
\code{\link{cumulative}}.
+
+
}
\examples{
-earg = list(cutpoint=2)
-polf("p", earg=earg, short=FALSE)
-polf("p", earg=earg, tag=TRUE)
+earg = list(cutpoint = 2)
+polf("p", earg = earg, short = FALSE)
+polf("p", earg = earg, tag = TRUE)
p = seq(0.01, 0.99, by=0.01)
-y = polf(p, earg=earg)
-y. = polf(p, earg=earg, deriv=1)
-max(abs(polf(y, earg=earg, inv=TRUE) - p)) # Should be 0
+y = polf(p, earg = earg)
+y. = polf(p, earg = earg, deriv = 1)
+max(abs(polf(y, earg = earg, inv = TRUE) - p)) # Should be 0
\dontrun{
-par(mfrow=c(2,1), las=1)
-plot(p, y, type="l", col="blue", main="polf()")
-abline(h=0, v=0.5, col="red", lty="dashed")
+par(mfrow=c(2,1), las = 1)
+plot(p, y, type = "l", col = "blue", main = "polf()")
+abline(h=0, v=0.5, col = "red", lty = "dashed")
-plot(p, y., type="l", col="blue",
- main="(Reciprocal of) first POLF derivative") }
+plot(p, y., type = "l", col = "blue",
+ main = "(Reciprocal of) first POLF derivative") }
# Rutherford and Geiger data
@@ -128,8 +138,8 @@ with(ruge, length(yy)) # 2608 1/8-minute intervals
cutpoint = 5
ruge = transform(ruge, yy01 = ifelse(yy <= cutpoint, 0, 1))
earg = list(cutpoint=cutpoint)
-fit = vglm(yy01 ~ 1, binomialff(link="polf", earg=earg), ruge)
-coef(fit, matrix=TRUE)
+fit = vglm(yy01 ~ 1, binomialff(link = "polf", earg = earg), ruge)
+coef(fit, matrix = TRUE)
exp(coef(fit))
@@ -143,15 +153,15 @@ pdat = transform(pdat, cuty = Cut(y1, breaks=cutpoints))
\dontrun{
with(pdat, plot(x2, x3, col=cuty, pch=as.character(cuty))) }
with(pdat, table(cuty) / sum(table(cuty)))
-fit = vglm(cuty ~ x2 + x3, fam = cumulative(link="polf",
- reverse=TRUE, parallel=TRUE, intercept.apply=TRUE,
- mv=TRUE, earg=list(cutpoint=cutpoints[2:3])),
- pdat, trace=TRUE)
+fit = vglm(cuty ~ x2 + x3, fam = cumulative(link = "polf",
+ reverse = TRUE, parallel = TRUE, intercept.apply = TRUE,
+ mv = TRUE, earg = list(cutpoint=cutpoints[2:3])),
+ pdat, trace = TRUE)
head(fit at y)
head(fitted(fit))
head(predict(fit))
coef(fit)
-coef(fit, matrix=TRUE)
+coef(fit, matrix = TRUE)
constraints(fit)
fit at misc$earg
}
diff --git a/man/posgeomUC.Rd b/man/posgeomUC.Rd
new file mode 100644
index 0000000..bd4699f
--- /dev/null
+++ b/man/posgeomUC.Rd
@@ -0,0 +1,107 @@
+\name{Posgeom}
+\alias{Posgeom}
+\alias{dposgeom}
+\alias{pposgeom}
+\alias{qposgeom}
+\alias{rposgeom}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Positive-geometric Distribution }
+\description{
+ Density, distribution function, quantile function and random generation
+ for the positive-geometric distribution.
+
+}
+\usage{
+dposgeom(x, prob, log = FALSE)
+pposgeom(q, prob)
+qposgeom(p, prob)
+rposgeom(n, prob)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations.
+ If \code{length(n) > 1} then the length is taken to be the number required.
+
+ }
+ \item{prob}{
+ vector of probabilities of success (of an ordinary geometric distribution).
+ Short vectors are recycled.
+
+ }
+ \item{log}{
+ logical.
+
+ }
+}
+\details{
+ The positive-geometric distribution is a geometric distribution but with
+ the probability of a zero being zero. The other probabilities are scaled
+ to add to unity.
+ The mean therefore is \eqn{1/prob}{1/prob}.
+
+ As \eqn{prob}{prob} decreases, the positive-geometric and geometric
+ distributions become more similar.
+ Like similar functions for the geometric distribution, a zero value
+ of \code{prob} is not permitted here.
+
+
+}
+\value{
+ \code{dposgeom} gives the density,
+ \code{pposgeom} gives the distribution function,
+ \code{qposgeom} gives the quantile function, and
+ \code{rposgeom} generates random deviates.
+
+
+}
+%\references{
+%None.
+%}
+
+\author{ T. W. Yee }
+\note{
+ For \code{rposgeom()}, the arguments of the function are fed
+ into \code{\link[stats:Geometric]{rgeom}} until \eqn{n} positive
+ values are obtained. This may take a long time if \code{prob}
+ has values close to 1.
+
+
+% The family function \code{posgeometric} needs not be written.
+% If it were, then it would estimate
+% \eqn{prob}{prob} by maximum likelihood estimation.
+
+
+}
+
+\seealso{
+% \code{posgeometric},
+ \code{\link{zageometric}},
+ \code{\link[stats:Geometric]{rgeom}}.
+
+
+}
+\examples{
+prob <- 0.75; y = rposgeom(n = 1000, prob)
+table(y)
+mean(y) # Sample mean
+1/prob # Population mean
+
+(ii <- dposgeom(0:7, prob))
+cumsum(ii) - pposgeom(0:7, prob) # Should be 0s
+table(rposgeom(100, prob))
+
+table(qposgeom(runif(1000), prob))
+round(dposgeom(1:10, prob) * 1000) # Should be similar
+
+\dontrun{
+x <- 0:5
+barplot(rbind(dposgeom(x, prob), dgeom(x, prob)),
+ beside = TRUE, col = c("blue", "orange"),
+ main = paste("Positive geometric(", prob, ") (blue) vs",
+ " geometric(", prob, ") (orange)", sep = ""),
+ names.arg = as.character(x), las = 1, lwd = 2) }
+}
+\keyword{distribution}
+
diff --git a/man/posnegbinomial.Rd b/man/posnegbinomial.Rd
index cc915e2..4a8de87 100644
--- a/man/posnegbinomial.Rd
+++ b/man/posnegbinomial.Rd
@@ -9,8 +9,9 @@
}
\usage{
posnegbinomial(lmunb = "loge", lsize = "loge", emunb = list(), esize = list(),
- isize = NULL, zero = -2, cutoff = 0.995, shrinkage.init = 0.95,
- imethod = 1)
+ isize = NULL, zero = -2, nsimEIM = 250,
+ shrinkage.init = 0.95, imethod = 1)
+
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -42,25 +43,9 @@ posnegbinomial(lmunb = "loge", lsize = "loge", emunb = list(), esize = list(),
computed internally using a range of values.
}
- \item{zero}{
- Integer valued vector, usually assigned \eqn{-2} or \eqn{2} if used
- at all. Specifies which of the two linear/additive predictors are
- modelled as an intercept only. By default, the \code{k} parameter
- (after \code{lsize} is applied) is modelled as a single unknown
- number that is estimated. It can be modelled as a function of
- the explanatory variables by setting \code{zero = NULL}. A negative
- value means that the value is recycled, so setting \eqn{-2} means
- all \code{k} are intercept only.
- See \code{\link{CommonVGAMffArguments}} for more information.
-
+ \item{nsimEIM, zero}{
+ See \code{\link{CommonVGAMffArguments}}.
- }
- \item{cutoff}{
- A numeric which is close to 1 but never exactly 1. Used to
- specify how many terms of the infinite series are actually used.
- The sum of the probabilites are added until they reach this value
- or more. It is like specifying \code{p} in an imaginary function
- \code{qnegbin(p)}.
}
\item{shrinkage.init, imethod}{
@@ -111,18 +96,18 @@ posnegbinomial(lmunb = "loge", lsize = "loge", emunb = list(), esize = list(),
}
\references{
-Barry, S. C. and Welsh, A. H. (2002)
-Generalized additive modelling and zero inflated count data.
-\emph{Ecological Modelling},
-\bold{157},
-179--188.
+ Barry, S. C. and Welsh, A. H. (2002)
+ Generalized additive modelling and zero inflated count data.
+ \emph{Ecological Modelling},
+ \bold{157},
+ 179--188.
-Williamson, E. and Bretherton, M. H. (1964)
-Tables of the logarithmic series distribution.
-\emph{Annals of Mathematical Statistics},
-\bold{35},
-284--297.
+ Williamson, E. and Bretherton, M. H. (1964)
+ Tables of the logarithmic series distribution.
+ \emph{Annals of Mathematical Statistics},
+ \bold{35},
+ 284--297.
}
@@ -144,22 +129,22 @@ Tables of the logarithmic series distribution.
}
\examples{
-pndat <- data.frame(x = runif(nn <- 2000))
-pndat <- transform(pndat, y1 = rposnegbin(nn, munb = exp(0+2*x), size = exp(1)),
+pdata <- data.frame(x = runif(nn <- 1000))
+pdata <- transform(pdata, y1 = rposnegbin(nn, munb = exp(0+2*x), size = exp(1)),
y2 = rposnegbin(nn, munb = exp(1+2*x), size = exp(3)))
-fit <- vglm(cbind(y1, y2) ~ x, posnegbinomial, pndat, trace = TRUE)
+fit <- vglm(cbind(y1, y2) ~ x, posnegbinomial, pdata, trace = TRUE)
coef(fit, matrix = TRUE)
-dim(fit at y)
+dim(depvar(fit)) # dim(fit at y) is not as good
# Another artificial data example
-pndat2 <- data.frame(munb = exp(2), size = exp(3)); nn <- 1000
-pndat2 <- transform(pndat2, y = rposnegbin(nn, munb = munb, size = size))
-with(pndat2, table(y))
-fit <- vglm(y ~ 1, posnegbinomial, pndat2, trace = TRUE)
+pdata2 <- data.frame(munb = exp(2), size = exp(3)); nn <- 1000
+pdata2 <- transform(pdata2, y3 = rposnegbin(nn, munb = munb, size = size))
+with(pdata2, table(y3))
+fit <- vglm(y3 ~ 1, posnegbinomial, pdata2, trace = TRUE)
coef(fit, matrix = TRUE)
-with(pndat2, mean(y)) # Sample mean
-head(with(pndat2, munb/(1-(size/(size+munb))^size)), 1) # Population mean
+with(pdata2, mean(y3)) # Sample mean
+head(with(pdata2, munb/(1-(size/(size+munb))^size)), 1) # Population mean
head(fitted(fit), 3)
head(predict(fit), 3)
@@ -174,6 +159,10 @@ Coef(fit)
(khat <- Coef(fit)["size"])
pdf2 <- dposnegbin(x = with(corbet, nindiv), mu = fitted(fit), size = khat)
print( with(corbet, cbind(nindiv, ofreq, fitted = pdf2*sum(ofreq))), dig = 1)
+\dontrun{ with(corbet,
+matplot(nindiv, cbind(ofreq, fitted = pdf2*sum(ofreq)), las = 1,
+ type = "b", ylab = "Frequency", col = c("blue", "orange"),
+ main = "blue 1s = observe; orange 2s = fitted")) }
}
\keyword{models}
\keyword{regression}
diff --git a/man/pospoisUC.Rd b/man/pospoisUC.Rd
index 3c2265d..ae5d1bb 100644
--- a/man/pospoisUC.Rd
+++ b/man/pospoisUC.Rd
@@ -12,7 +12,7 @@
}
\usage{
-dpospois(x, lambda, log=FALSE)
+dpospois(x, lambda, log = FALSE)
ppospois(q, lambda)
qpospois(p, lambda)
rpospois(n, lambda)
@@ -86,7 +86,7 @@ rpospois(n, lambda)
lambda <- 2; y = rpospois(n = 1000, lambda)
table(y)
mean(y) # Sample mean
-lambda / (1-exp(-lambda)) # Population mean
+lambda / (1 - exp(-lambda)) # Population mean
(ii <- dpospois(0:7, lambda))
cumsum(ii) - ppospois(0:7, lambda) # Should be 0s
@@ -95,8 +95,7 @@ table(rpospois(100, lambda))
table(qpospois(runif(1000), lambda))
round(dpospois(1:10, lambda) * 1000) # Should be similar
-\dontrun{
-x <- 0:7
+\dontrun{ x <- 0:7
barplot(rbind(dpospois(x, lambda), dpois(x, lambda)),
beside = TRUE, col = c("blue", "orange"),
main = paste("Positive Poisson(", lambda, ") (blue) vs",
diff --git a/man/pospoisson.Rd b/man/pospoisson.Rd
index ff6fcac..83748d4 100644
--- a/man/pospoisson.Rd
+++ b/man/pospoisson.Rd
@@ -11,32 +11,19 @@ pospoisson(link = "loge", earg = list(),
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link}{
- Link function for the usual mean (lambda) parameter of
+ \item{link, earg}{
+ Link function and extra argument for the usual mean (lambda) parameter of
an ordinary Poisson distribution.
See \code{\link{Links}} for more choices.
}
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{expected}{
Logical.
Fisher scoring is used if \code{expected = TRUE}, else Newton-Raphson.
}
- \item{ilambda}{
- Optional initial value for \eqn{\lambda}{lambda}.
- A \code{NULL} means a value is computed internally.
-
- }
- \item{imethod}{
- An integer with value \code{1} or \code{2} or \code{3} which
- specifies the initialization method for \eqn{\lambda}{lambda}.
- If failure to converge occurs try another value
- and/or else specify a value for \code{ilambda}.
+ \item{ilambda, imethod}{
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
@@ -49,10 +36,12 @@ pospoisson(link = "loge", earg = list(),
can be obtained by the extractor function \code{fitted} applied to
the object.
+
A related distribution is the zero-inflated Poisson, in which the
probability \eqn{P[Y=0]} involves another parameter \eqn{\phi}{phi}.
See \code{\link{zipoisson}}.
+
}
\section{Warning }{
Under- or over-flow may occur if the data is ill-conditioned.
@@ -77,33 +66,33 @@ contains further information and examples.
}
\author{ Thomas W. Yee }
\note{
+ This family function can handle a multivariate response.
+
Yet to be done: a \code{quasi.pospoisson} which estimates a dispersion
parameter.
- This family function can handle a multivariate response.
}
\seealso{
-\code{\link{Pospois}},
-\code{\link{posnegbinomial}},
-\code{\link{poissonff}},
-\code{\link{zipoisson}}.
+ \code{\link{Pospois}},
+ \code{\link{posnegbinomial}},
+ \code{\link{poissonff}},
+ \code{\link{zipoisson}}.
}
\examples{
# Data from Coleman and James (1961)
-cjdat = data.frame(y = 1:6, w = c(1486, 694, 195, 37, 10, 1))
-fit = vglm(y ~ 1, pospoisson, cjdat, weights=w)
+cjdat = data.frame(y = 1:6, freq = c(1486, 694, 195, 37, 10, 1))
+fit = vglm(y ~ 1, pospoisson, cjdat, weights = freq)
Coef(fit)
summary(fit)
fitted(fit)
-# Artificial data
-pdat = data.frame(x = runif(nn <- 1000))
-pdat = transform(pdat, lambda = exp(1 - 2*x))
-pdat = transform(pdat, y = rpospois(nn, lambda))
-with(pdat, table(y))
-fit = vglm(y ~ x, pospoisson, pdat, trace=TRUE, crit="c")
+pdat = data.frame(x2 = runif(nn <- 1000)) # Artificial data
+pdat = transform(pdat, lambda = exp(1 - 2 * x2))
+pdat = transform(pdat, y1 = rpospois(nn, lambda))
+with(pdat, table(y1))
+fit = vglm(y1 ~ x2, pospoisson, pdat, trace = TRUE, crit = "coef")
coef(fit, matrix=TRUE)
}
\keyword{models}
diff --git a/man/propodds.Rd b/man/propodds.Rd
index f7af884..be31bca 100644
--- a/man/propodds.Rd
+++ b/man/propodds.Rd
@@ -80,7 +80,7 @@ contains further information and examples.
# Fit the proportional odds model, p.179, in McCullagh and Nelder (1989)
pneumo = transform(pneumo, let = log(exposure.time))
(fit = vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo))
-fit at y # Sample proportions
+depvar(fit) # Sample proportions
weights(fit, type = "prior") # Number of observations
coef(fit, matrix = TRUE)
constraints(fit) # Constraint matrices
diff --git a/man/qrrvglm.control.Rd b/man/qrrvglm.control.Rd
index 890c71c..da9f5fe 100644
--- a/man/qrrvglm.control.Rd
+++ b/man/qrrvglm.control.Rd
@@ -21,7 +21,7 @@ qrrvglm.control(Rank = 1,
FastAlgorithm = TRUE,
GradientFunction = TRUE,
Hstep = 0.001,
- isdlv = rep(c(2, 1, rep(0.5, len=Rank)), len=Rank),
+ isdlv = rep(c(2, 1, rep(0.5, length = Rank)), length = Rank),
iKvector = 0.1,
iShape = 0.1,
ITolerances = FALSE,
@@ -478,18 +478,25 @@ p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
ITolerances = TRUE, isdlv = isdlv, # Note the use of isdlv here
fam = quasipoissonff, data = hspider)
sort(p1 at misc$deviance.Bestof) # A history of all the iterations
-
-# Negative binomial CQO; smallest deviance is about 275.389
-set.seed(1234) # This leads to a reasonable (but not the global) solution?
-nb1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
- Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
- WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- ITol = FALSE, EqualTol = TRUE, # A good idea for negbinomial
- fam = negbinomial, data = hspider)
-sort(nb1 at misc$deviance.Bestof) # A history of all the iterations
-summary(nb1)
-\dontrun{ lvplot(nb1, lcol=1:12, y = TRUE, pcol=1:12) }
}
\keyword{models}
\keyword{regression}
+
+
+%\dontrun{
+%# 20120221; withdrawn for a while coz it creates a lot of error messages.
+%# Negative binomial CQO; smallest deviance is about 275.389
+%set.seed(1234) # This leads to a reasonable (but not the global) solution?
+%nb1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
+% Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
+% WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+% ITol = FALSE, EqualTol = TRUE, # A good idea for negbinomial
+% fam = negbinomial, data = hspider)
+%sort(nb1 at misc$deviance.Bestof) # A history of all the iterations
+%summary(nb1)
+%}
+%\dontrun{ lvplot(nb1, lcol=1:12, y = TRUE, pcol=1:12) }
+
+
+
diff --git a/man/qtplot.lmscreg.Rd b/man/qtplot.lmscreg.Rd
index 6379100..313fed4 100644
--- a/man/qtplot.lmscreg.Rd
+++ b/man/qtplot.lmscreg.Rd
@@ -69,7 +69,7 @@ contains further information and examples.
}
\examples{\dontrun{
-fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bminz)
+fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bmi.nz)
qtplot(fit)
qtplot(fit, perc=c(25,50,75,95), lcol="blue", tcol="blue", llwd=2)
}
diff --git a/man/RayleighUC.Rd b/man/rayleighUC.Rd
similarity index 100%
rename from man/RayleighUC.Rd
rename to man/rayleighUC.Rd
diff --git a/man/rrar.Rd b/man/rrar.Rd
index 45722a1..03c1753 100644
--- a/man/rrar.Rd
+++ b/man/rrar.Rd
@@ -14,17 +14,20 @@ rrar(Ranks = 1, coefstart = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{Ranks}{ Vector of integers: the ranks of the model.
+ \item{Ranks}{
+ Vector of integers: the ranks of the model.
Each value must be at least one and no more than \code{M},
where \code{M} is the number of response variables in the time series.
The length of \code{Ranks} is the \emph{lag}, which is often denoted by
the symbol \emph{L} in the literature.
+
}
- \item{coefstart}{ Optional numerical vector of initial values for the
- coefficients.
+ \item{coefstart}{
+ Optional numerical vector of initial values for the coefficients.
By default, the family function chooses these automatically.
+
}
}
\details{
@@ -58,42 +61,41 @@ time series.
}
\author{ T. W. Yee }
\note{
-This family function should
-be used within \code{\link{vglm}} and
-not with \code{\link{rrvglm}} because
-it does not fit into the RR-VGLM framework exactly. Instead, the
-reduced-rank model is formulated as a VGLM!
+ This family function should be used within \code{\link{vglm}}
+ and not with \code{\link{rrvglm}} because it does not fit into
+ the RR-VGLM framework exactly. Instead, the reduced-rank model
+ is formulated as a VGLM!
-A methods function \code{Coef.rrar}, say, has yet to be written.
-It would return the quantities
-\code{Ak1},
-\code{C},
-\code{D},
-\code{omegahat},
-\code{Phi},
-etc. as slots, and then \code{print.Coef.rrar} would also need to be
-written.
+ A methods function \code{Coef.rrar}, say, has yet to be written.
+ It would return the quantities
+ \code{Ak1},
+ \code{C},
+ \code{D},
+ \code{omegahat},
+ \code{Phi},
+ etc. as slots, and then \code{show.Coef.rrar} would also need to be
+ written.
}
\seealso{
\code{\link{vglm}},
- \code{\link{usgrain}}.
+ \code{\link{grain.us}}.
+
}
\examples{
-\dontrun{
year = seq(1961 + 1/12, 1972 + 10/12, by = 1/12)
-par(mar = c(4, 4, 2, 2) + 0.1, mfrow = c(2, 2))
+\dontrun{ par(mar = c(4, 4, 2, 2) + 0.1, mfrow = c(2, 2))
for(ii in 1:4) {
- plot(year, usgrain[, ii], main = names(usgrain)[ii],
- type = "l", xlab = "", ylab = "")
- points(year, usgrain[,ii], pch = "*")
-}
-apply(usgrain, 2, mean) # mu vector
-cgrain = scale(usgrain, scale = FALSE) # Center the time series only
+ plot(year, grain.us[, ii], main = names(grain.us)[ii], las = 1,
+ type = "l", xlab = "", ylab = "", col = "blue")
+ points(year, grain.us[,ii], pch = "*", col = "blue")
+}}
+apply(grain.us, 2, mean) # mu vector
+cgrain = scale(grain.us, scale = FALSE) # Center the time series only
fit = vglm(cgrain ~ 1, rrar(Ranks = c(4, 1)), trace = TRUE)
summary(fit)
@@ -103,13 +105,12 @@ print(fit at misc$Dmatrices, dig = 3)
print(fit at misc$omegahat, dig = 3)
print(fit at misc$Phimatrices, dig = 2)
-par(mar = c(4, 4, 2, 2) + 0.1, mfrow = c(4, 1))
+\dontrun{ par(mar = c(4, 4, 2, 2) + 0.1, mfrow = c(4, 1))
for(ii in 1:4) {
plot(year, fit at misc$Z[,ii], main = paste("Z", ii, sep = ""),
- type = "l", xlab = "", ylab = "")
- points(year, fit at misc$Z[,ii], pch = "*")
-}
-}
+ type = "l", xlab = "", ylab = "", las = 1, col = "blue")
+ points(year, fit at misc$Z[,ii], pch = "*", col = "blue")
+} }
}
\keyword{ts}
\keyword{regression}
diff --git a/man/rrvglm.Rd b/man/rrvglm.Rd
index 9b5dcd1..9efd118 100644
--- a/man/rrvglm.Rd
+++ b/man/rrvglm.Rd
@@ -145,7 +145,7 @@ Regression and ordered categorical variables.
\bold{46}, 1--30.
- Yee, T. W. (2010)
+ Yee, T. W. (2012)
Two-parameter reduced-rank vector generalized linear models.
\emph{In preparation}.
@@ -219,7 +219,7 @@ Regression and ordered categorical variables.
\code{\link{negbinomial}}
\code{\link{zipoisson}}
and \code{\link{zinegbinomial}}.
- (see Yee (2010) and \pkg{COZIGAM}).
+ (see Yee (2012) and \pkg{COZIGAM}).
Methods functions include
\code{\link{Coef.rrvglm}},
\code{summary.rrvglm},
diff --git a/man/sinmad.Rd b/man/sinmad.Rd
index 2ac9161..f2b28e8 100644
--- a/man/sinmad.Rd
+++ b/man/sinmad.Rd
@@ -7,24 +7,24 @@
Singh-Maddala distribution.
}
\usage{
-sinmad(link.a = "loge", link.scale = "loge", link.q = "loge",
- earg.a = list(), earg.scale = list(), earg.q = list(),
- init.a = NULL, init.scale = NULL, init.q = 1, zero = NULL)
+sinmad(lshape1.a = "loge", lscale = "loge", lshape3.q = "loge",
+ eshape1.a = list(), escale = list(), eshape3.q = list(),
+ ishape1.a = NULL, iscale = NULL, ishape3.q = 1, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link.a, link.scale, link.q}{
+ \item{lshape1.a, lscale, lshape3.q}{
Parameter link functions applied to the
(positive) parameters \code{a}, \code{scale}, and \code{q}.
See \code{\link{Links}} for more choices.
}
- \item{earg.a, earg.scale, earg.q}{
+ \item{eshape1.a, escale, eshape3.q}{
List. Extra argument for each of the links.
See \code{earg} in \code{\link{Links}} for general information.
}
- \item{init.a, init.scale, init.q}{
+ \item{ishape1.a, iscale, ishape3.q}{
Optional initial values for \code{a}, \code{scale}, and \code{q}.
}
@@ -62,7 +62,7 @@ The cumulative distribution function is
The mean is
\deqn{E(Y) = b \, \Gamma(1 + 1/a) \, \Gamma(q - 1/a) / \Gamma(q)}{%
E(Y) = b gamma(1 + 1/a) gamma(q - 1/a) / gamma(q)}
-provided \eqn{-a < 1 < aq}.
+provided \eqn{-a < 1 < aq}; these are returned as the fitted values.
}
@@ -70,20 +70,25 @@ provided \eqn{-a < 1 < aq}.
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
+
Kleiber, C. and Kotz, S. (2003)
\emph{Statistical Size Distributions in Economics and
Actuarial Sciences},
-Hoboken, NJ: Wiley-Interscience.
+Hoboken, NJ, USA: Wiley-Interscience.
+
}
\author{ T. W. Yee }
\note{
-If the self-starting initial values fail, try experimenting
-with the initial value arguments, especially those whose
-default value is not \code{NULL}.
+ If the self-starting initial values fail, try experimenting with
+ the initial value arguments, especially those whose default
+ value is not \code{NULL}. Also, the constraint \eqn{-a < 1 < aq}
+ may be violated as the iterations progress.
+
}
@@ -101,10 +106,10 @@ default value is not \code{NULL}.
}
\examples{
-sdata = data.frame(y = rsinmad(n=3000, 3, 5, 2))
-fit = vglm(y ~ 1, sinmad, sdata, trace=TRUE)
-fit = vglm(y ~ 1, sinmad, sdata, trace=TRUE, crit="c")
-coef(fit, mat=TRUE)
+sdata = data.frame(y = rsinmad(n = 1000, exp(1), exp(2), exp(0)))
+fit = vglm(y ~ 1, sinmad, sdata, trace = TRUE)
+fit = vglm(y ~ 1, sinmad, sdata, trace = TRUE, crit = "coef")
+coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
}
diff --git a/man/SinmadUC.Rd b/man/sinmadUC.Rd
similarity index 80%
rename from man/SinmadUC.Rd
rename to man/sinmadUC.Rd
index 5c53216..42c250d 100644
--- a/man/SinmadUC.Rd
+++ b/man/sinmadUC.Rd
@@ -12,17 +12,17 @@
}
\usage{
-dsinmad(x, a, scale = 1, q.arg, log = FALSE)
-psinmad(q, a, scale = 1, q.arg)
-qsinmad(p, a, scale = 1, q.arg)
-rsinmad(n, a, scale = 1, q.arg)
+dsinmad(x, shape1.a, scale = 1, shape3.q, log = FALSE)
+psinmad(q, shape1.a, scale = 1, shape3.q)
+qsinmad(p, shape1.a, scale = 1, shape3.q)
+rsinmad(n, shape1.a, scale = 1, shape3.q)
}
\arguments{
\item{x, q}{vector of quantiles.}
\item{p}{vector of probabilities.}
\item{n}{number of observations. If \code{length(n) > 1}, the length
is taken to be the number required.}
- \item{a, q.arg}{shape parameters.}
+ \item{shape1.a, shape3.q}{shape parameters.}
\item{scale}{scale parameter.}
\item{log}{
Logical.
@@ -39,6 +39,7 @@ rsinmad(n, a, scale = 1, q.arg)
}
\references{
+
Kleiber, C. and Kotz, S. (2003)
\emph{Statistical Size Distributions in Economics and
Actuarial Sciences},
@@ -50,19 +51,22 @@ Hoboken, NJ: Wiley-Interscience.
See \code{\link{sinmad}}, which is the \pkg{VGAM} family function
for estimating the parameters by maximum likelihood estimation.
+
}
\note{
The Singh-Maddala distribution is a special case of the 4-parameter
generalized beta II distribution.
+
}
\seealso{
\code{\link{sinmad}},
\code{\link{genbetaII}}.
+
}
\examples{
-y = rsinmad(n = 3000, 4, 6, 2)
-fit = vglm(y ~ 1, sinmad(init.a = 2.1), trace = TRUE, crit = "c")
+sdata = data.frame(y = rsinmad(n = 3000, 4, 6, 2))
+fit = vglm(y ~ 1, sinmad(ishape1.a = 2.1), sdata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit)
}
diff --git a/man/toxop.Rd b/man/toxop.Rd
index 85d0a3e..df0ad5c 100644
--- a/man/toxop.Rd
+++ b/man/toxop.Rd
@@ -9,11 +9,15 @@
\format{
A data frame with 34 observations on the following 4 variables.
\describe{
- \item{\code{rainfall}}{a numeric vector; the amount of rainfall in each city.}
+ \item{\code{rainfall}}{
+ a numeric vector; the amount of rainfall
+ in each city.
+
+ }
\item{\code{ssize}}{a numeric vector; sample size.}
\item{\code{cityNo}}{a numeric vector; the city number.}
- \item{\code{positive}}{a numeric vector; the number of subjects testing positive
- for the disease. }
+ \item{\code{positive}}{a numeric vector; the number of subjects
+ testing positive for the disease. }
}
}
\details{
diff --git a/man/tparetoUC.Rd b/man/tparetoUC.Rd
index b96c16c..8a26c46 100644
--- a/man/tparetoUC.Rd
+++ b/man/tparetoUC.Rd
@@ -66,12 +66,13 @@ plot(xx, dtpareto(xx, low = lower, upp = upper, shape = kay),
main = "Truncated Pareto density split into 10 equal areas",
type = "l", ylim = 0:1, xlab = "x")
abline(h = 0, col = "blue", lty = 2)
-qq = qtpareto(seq(0.1, 0.9, by = 0.1), low = lower, upp = upper, shape = kay)
+qq = qtpareto(seq(0.1, 0.9, by = 0.1), low = lower, upp = upper,
+ shape = kay)
lines(qq, dtpareto(qq, low = lower, upp = upper, shape = kay),
col = "purple", lty = 3, type = "h")
-lines(xx, ptpareto(xx, low = lower, upp = upper, shape = kay), col = "orange")
-}
-pp = seq(0.1, 0.9,by = 0.1)
+lines(xx, ptpareto(xx, low = lower, upp = upper, shape = kay),
+ col = "orange") }
+pp = seq(0.1, 0.9, by = 0.1)
qq = qtpareto(pp, low = lower, upp = upper, shape = kay)
ptpareto(qq, low = lower, upp = upper, shape = kay)
diff --git a/man/trplot.qrrvglm.Rd b/man/trplot.qrrvglm.Rd
index 6f1a952..0089d8e 100644
--- a/man/trplot.qrrvglm.Rd
+++ b/man/trplot.qrrvglm.Rd
@@ -11,13 +11,13 @@ It is only applicable for rank-1 models with argument
}
\usage{
-trplot.qrrvglm(object, whichSpecies = NULL, add=FALSE, plot.it=TRUE,
+trplot.qrrvglm(object, whichSpecies = NULL, add=FALSE, plot.it = TRUE,
label.sites = FALSE, sitenames = rownames(object at y),
axes.equal = TRUE, cex = par()$cex,
col = 1:(nos * (nos - 1)/2), log = "",
- lty = rep(par()$lty, len = nos * (nos - 1)/2),
- lwd = rep(par()$lwd, len = nos * (nos - 1)/2),
- tcol = rep(par()$col, len = nos * (nos - 1)/2),
+ lty = rep(par()$lty, length.out = nos * (nos - 1)/2),
+ lwd = rep(par()$lwd, length.out = nos * (nos - 1)/2),
+ tcol = rep(par()$col, length.out = nos * (nos - 1)/2),
xlab = NULL, ylab = NULL,
main = "", type = "b", check.ok = TRUE, ...)
}
@@ -122,12 +122,14 @@ variables.
}
\references{
-Yee, T. W. (2011)
+
+Yee, T. W. (2012)
On constrained and unconstrained
quadratic ordination.
\emph{Manuscript in preparation}.
+
}
\author{ Thomas W. Yee }
@@ -137,9 +139,10 @@ quadratic ordination.
The use of \code{xlim} and \code{ylim} to control the axis limits
is also a good idea, so as to limit the extent of the curves at low
abundances or probabilities.
- Setting \code{label.sites=TRUE} is a good idea only if the number of
+ Setting \code{label.sites = TRUE} is a good idea only if the number of
sites is small, otherwise there is too much clutter.
+
}
\seealso{
@@ -147,6 +150,7 @@ quadratic ordination.
\code{\link[graphics]{par}},
\code{\link[graphics]{title}}.
+
}
\examples{\dontrun{ set.seed(111) # This leads to the global solution
diff --git a/man/ugss.Rd b/man/ugss.Rd
index 4377a06..ffa6746 100644
--- a/man/ugss.Rd
+++ b/man/ugss.Rd
@@ -31,8 +31,10 @@
a numeric vector}
\item{\code{tv}}{Average number of hours watching TV per week,
a numeric vector}
- \item{\code{movies}}{Number of movies seen at a cinema during the last 3 months,
- a numeric vector}
+ \item{\code{movies}}{
+ Number of movies seen at a cinema during the last 3 months,
+ a numeric vector
+ }
\item{\code{movies3m}}{Seen movies in last 3 months?
a factor, (Yes or No)}
\item{\code{sport}}{Favourite sport, a factor,
diff --git a/man/undocumented-methods.Rd b/man/undocumented-methods.Rd
index 938ff0b..4c51271 100644
--- a/man/undocumented-methods.Rd
+++ b/man/undocumented-methods.Rd
@@ -5,6 +5,33 @@
%
%
%
+%
+% 20120215
+%\alias{print,vglmff-method}
+\alias{show,vglmff-method}
+%
+%
+%
+% 20120112
+\alias{AIC,ANY-method}
+\alias{coef,ANY-method}
+\alias{logLik,ANY-method}
+\alias{plot,ANY-method}
+\alias{vcov,ANY-method}
+\alias{plot,cao,ANY-method}
+\alias{plot,qrrvglm,ANY-method}
+\alias{plot,rcam,ANY-method}
+\alias{plot,rcam0,ANY-method}
+\alias{plot,uqo,ANY-method}
+\alias{plot,vgam,ANY-method}
+\alias{plot,vglm,ANY-method}
+\alias{plot,vlm,ANY-method}
+\alias{plot,vsmooth.spline,ANY-method}
+%
+%
+%
+%
+%
\alias{AIC,vlm-method}
\alias{AIC,vglm-method}
\alias{AIC,vgam-method}
@@ -99,13 +126,15 @@
\alias{guplot,vlm-method}
%\alias{model.frame,ANY-method}
\alias{model.frame,vlm-method}
-\alias{plot,rcam0,ANY-method}
-\alias{plot,rcam,ANY-method}
-\alias{plot,cao,ANY-method}
-\alias{plot,vlm,ANY-method}
-\alias{plot,vglm,ANY-method}
-\alias{plot,vgam,ANY-method}
-\alias{plot,qrrvglm,ANY-method}
+%\alias{plot,rcam0,ANY-method}
+%\alias{plot,rcam,ANY-method}
+%\alias{plot,cao,ANY-method}
+%\alias{plot,vlm,ANY-method}
+%\alias{plot,vglm,ANY-method}
+%\alias{plot,vgam,ANY-method}
+%\alias{plot,qrrvglm,ANY-method}
+%\alias{plot,uqo,ANY-method}
+%\alias{plot,vsmooth.spline,ANY-method}
\alias{predictors,vglm-method}
\alias{rlplot,vglm-method}
\alias{terms,vlm-method}
@@ -154,8 +183,6 @@
\alias{persp,cao-method}
\alias{persp,qrrvglm-method}
\alias{persp,uqo-method}
-\alias{plot,uqo,ANY-method}
-\alias{plot,vsmooth.spline,ANY-method}
\alias{predict,cao-method}
\alias{predict,qrrvglm-method}
\alias{predict,vgam-method}
@@ -170,7 +197,16 @@
% Added 20090505:
%\alias{print,ANY-method}
%
-\alias{print,vglmff-method}
+%
+% Added 20111224:
+\alias{lrtest,ANY-method}
+\alias{lrtest,vglm-method}
+%\alias{waldtest,ANY-method}
+\alias{print,VGAManova-method}
+\alias{show,VGAManova-method}
+%
+%
+%
\alias{print,Coef.cao-method}
\alias{print,summary.cao-method}
\alias{print,qrrvglm-method}
@@ -205,7 +241,6 @@
\alias{resid,vgam-method}
\alias{resid,uqo-method}
\alias{resid,vsmooth.spline-method}
-\alias{show,vglmff-method}
\alias{show,Coef.cao-method}
\alias{show,summary.cao-method}
\alias{show,qrrvglm-method}
@@ -270,7 +305,8 @@
}
%\usage{
-% \S4method{ccoef}{cao,Coef.cao,rrvglm,qrrvglm,Coef.rrvglm,Coef.qrrvglm}(object, ...)
+% \S4method{ccoef}{cao,Coef.cao,rrvglm,qrrvglm,
+% Coef.rrvglm,Coef.qrrvglm}(object, ...)
%}
\section{Methods}{
diff --git a/man/uqo.Rd b/man/uqo.Rd
index c27a10c..44d4bd9 100644
--- a/man/uqo.Rd
+++ b/man/uqo.Rd
@@ -54,7 +54,7 @@ uqo(formula, family, data = list(), weights = NULL, subset = NULL,
The ``factory-fresh'' default is \code{na.omit}.
}
\item{etastart}{ starting values for the linear predictors.
- It is a \eqn{M}-column matrix. If \eqn{M=1} then it may be a vector.
+ It is a \eqn{M}-column matrix. If \eqn{M = 1} then it may be a vector.
}
\item{mustart}{ starting values for the
fitted values. It can be a vector or a matrix.
@@ -105,14 +105,14 @@ uqo(formula, family, data = list(), weights = NULL, subset = NULL,
are largely free parameters and are not constrained to be linear
combinations of the environmental variables. This poses a
difficult optimization problem. The current algorithm is very simple
- and will often fail (even for \code{Rank=1}) but hopefully this will
+ and will often fail (even for \code{Rank = 1}) but hopefully this will
be improved in the future.
The central formula is given by
\deqn{\eta = B_1^T x_1 + A \nu +
- \sum_{m=1}^M (\nu^T D_m \nu) e_m}{%
+ \sum_{m = 1}^M (\nu^T D_m \nu) e_m}{%
eta = B_1^T x_1 + A nu +
- sum_{m=1}^M (nu^T D_m nu) e_m}
+ sum_{m = 1}^M (nu^T D_m nu) e_m}
where \eqn{x_1}{x_1} is a vector (usually just a 1 for an intercept),
\eqn{\nu}{nu} is a \eqn{R}-vector of latent variables, \eqn{e_m} is
a vector of 0s but with a 1 in the \eqn{m}th position.
@@ -141,21 +141,24 @@ estimated for each species, hence will give an error message here.
}
\references{
+
Yee, T. W. (2004)
A new technique for maximum-likelihood
canonical Gaussian ordination.
\emph{Ecological Monographs},
\bold{74}, 685--701.
-Yee, T. W. (2005)
-On constrained and unconstrained
-quadratic ordination.
-\emph{Manuscript in preparation}.
+
+%Yee, T. W. (2005)
+%On constrained and unconstrained quadratic ordination.
+%\emph{Manuscript in preparation}.
+
Yee, T. W. (2006)
Constrained additive ordination.
\emph{Ecology}, \bold{87}, 203--213.
+
}
\author{Thomas W. Yee}
@@ -165,6 +168,7 @@ Constrained additive ordination.
When \eqn{R>1}, they are uncorrelated and should be unique up
to a rotation.
+
The argument \code{Bestof} in \code{\link{uqo.control}} controls
the number of models fitted (each uses different starting values) to
the data. This argument is important because convergence may be to a
@@ -173,32 +177,38 @@ starting values increases the chances of finding the global solution.
Local solutions arise because the optimization problem is highly
nonlinear.
+
In the example below, a CQO model is fitted and used for providing
initial values for a UQO model.
+
}
\section{Warning }{
Local solutions are not uncommon when fitting UQO models. To increase
the chances of obtaining the global solution, set
- \code{ITolerances=TRUE} or \code{EqualTolerances=TRUE} and increase
+ \code{ITolerances = TRUE} or \code{EqualTolerances = TRUE} and increase
the value of the argument \code{Bestof} in \code{\link{uqo.control}}.
For reproducibility of the results, it pays to set a different random
number seed before calling \code{uqo} (the function
\code{\link[base:Random]{set.seed}} does this).
+
The function \code{uqo} is very sensitive to initial values, and there
is a lot of room for improvement here.
+
UQO is computationally expensive. It pays to keep the rank to no more
than 2, and 1 is much preferred over 2.
The data needs to conform closely to the statistical model.
+
Currently there is a bug with the argument \code{Crow1positive}
in \code{\link{uqo.control}}. This argument might be interpreted
as controlling the sign of the first site score, but currently
this is not done.
+
}
\seealso{
@@ -216,9 +226,10 @@ this is not done.
\code{vcov.uqo},
\code{\link[base:Random]{set.seed}},
\code{\link{hspider}}.
+
+
}
-\examples{
-\dontrun{
+\examples{ \dontrun{
set.seed(123) # This leads to the global solution
hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
@@ -226,7 +237,7 @@ p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
Trocterr, Zoraspin) ~
WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
ITolerances = TRUE, fam = poissonff, data = hspider,
- Crow1positive=TRUE, Bestof=3, trace=FALSE)
+ Crow1positive = TRUE, Bestof=3, trace = FALSE)
if (deviance(p1) > 1589.0) stop("suboptimal fit obtained")
set.seed(111)
@@ -240,29 +251,28 @@ if (deviance(up1) > 1310.0) stop("suboptimal fit obtained")
nos = ncol(up1 at y) # Number of species
clr = (1:(nos+1))[-7] # to omit yellow
-lvplot(up1, las=1, y=TRUE, pch=1:nos, scol=clr, lcol=clr,
- pcol=clr, llty=1:nos, llwd=2)
-legend(x=2, y=135, colnames(up1 at y), col=clr, lty=1:nos,
- lwd=2, merge=FALSE, ncol=1, x.inter=4.0, bty="l", cex=0.9)
+lvplot(up1, las = 1, y = TRUE, pch = 1:nos, scol = clr, lcol = clr,
+ pcol = clr, llty = 1:nos, llwd=2)
+legend(x=2, y = 135, colnames(up1 at y), col = clr, lty = 1:nos,
+ lwd=2, merge = FALSE, ncol = 1, x.inter=4.0, bty = "l", cex = 0.9)
# Compare the site scores between the two models
-plot(lv(p1), lv(up1), xlim=c(-3,4), ylim=c(-3,4), las=1)
-abline(a=0, b=-1, lty=2, col="blue", xpd=FALSE)
-cor(lv(p1, ITol=TRUE), lv(up1))
+plot(lv(p1), lv(up1), xlim = c(-3,4), ylim = c(-3,4), las = 1)
+abline(a = 0, b=-1, lty=2, col = "blue", xpd = FALSE)
+cor(lv(p1, ITol = TRUE), lv(up1))
# Another comparison between the constrained and unconstrained models
# The signs are not right so they are similar when reflected about 0
-par(mfrow=c(2,1))
-persp(up1, main="Red/Blue are the constrained/unconstrained models",
- label=TRUE, col="blue", las=1)
-persp(p1, add=FALSE, col="red")
-pchisq(deviance(p1) - deviance(up1), df=52-30, lower.tail=FALSE)
-}
-}
+par(mfrow = c(2,1))
+persp(up1, main = "Red/Blue are the constrained/unconstrained models",
+ label = TRUE, col = "blue", las = 1)
+persp(p1, add = FALSE, col = "red")
+pchisq(deviance(p1) - deviance(up1), df=52-30, lower.tail = FALSE)
+}}
\keyword{models}
\keyword{regression}
% 6/10/06; when the bug is fixed:
-%persp(p1, add=TRUE, col="red")
+%persp(p1, add = TRUE, col = "red")
diff --git a/man/uqo.control.Rd b/man/uqo.control.Rd
index 2e6ac43..5b342cd 100644
--- a/man/uqo.control.Rd
+++ b/man/uqo.control.Rd
@@ -232,23 +232,29 @@ uqo.control(Rank=1, Bestof = if (length(lvstart) &&
}
\references{
-Yee, T. W. (2005)
-On constrained and unconstrained
-quadratic ordination.
-\emph{Manuscript in preparation}.
Yee, T. W. (2006)
Constrained additive ordination.
\emph{Ecology}, \bold{87}, 203--213.
+
+%Yee, T. W. (2012)
+%On constrained and unconstrained quadratic ordination.
+%\emph{Manuscript in preparation}.
+
+
}
\author{T. W. Yee}
\note{
This is a difficult optimization problem, and the current
algorithm needs to be improved.
+
+
}
\seealso{
\code{\link{uqo}}.
+
+
}
\section{Warning }{
diff --git a/man/vgam-class.Rd b/man/vgam-class.Rd
index 55af6aa..362dca7 100644
--- a/man/vgam-class.Rd
+++ b/man/vgam-class.Rd
@@ -213,12 +213,15 @@ a more detailed summary of the object. }
}
\references{
+
Yee, T. W. and Wild, C. J. (1996)
Vector generalized additive models.
\emph{Journal of the Royal Statistical Society, Series B, Methodological},
\bold{58}, 481--493.
+
\url{http://www.stat.auckland.ac.nz/~yee}
+
}
\author{ Thomas W. Yee }
\note{
diff --git a/man/vgam.Rd b/man/vgam.Rd
index 054f8ee..865d518 100644
--- a/man/vgam.Rd
+++ b/man/vgam.Rd
@@ -110,13 +110,15 @@ vgam(formula, family, data = list(), weights = NULL, subset = NULL,
}
\item{constraints}{
- an optional list of constraint matrices. The components of the list
- must be named with the term it corresponds to (and it must match in
- character format exactly). Each constraint matrix must have \eqn{M} rows, and
- be of full-column rank. By default, constraint matrices are the \eqn{M}
- by \eqn{M} identity matrix unless arguments in the family function
- itself override these values. If \code{constraints} is used it must
- contain \emph{all} the terms; an incomplete list is not accepted.
+ an optional list of constraint matrices. The components
+ of the list must be named with the term it corresponds
+ to (and it must match in character format exactly).
+ Each constraint matrix must have \eqn{M} rows, and be
+ of full-column rank. By default, constraint matrices are
+ the \eqn{M} by \eqn{M} identity matrix unless arguments
+ in the family function itself override these values.
+ If \code{constraints} is used it must contain \emph{all}
+ the terms; an incomplete list is not accepted.
}
\item{extra}{
@@ -155,27 +157,28 @@ vgam(formula, family, data = list(), weights = NULL, subset = NULL,
implemented and this is called a \emph{vector (cubic smoothing spline)
smoother}.
Here, \eqn{j=1,\ldots,M} where \eqn{M} is finite.
- If all the functions are constrained to be linear then the resulting
- model is a vector generalized linear model (VGLM).
- VGLMs are best fitted with \code{\link{vglm}}.
+ If all the functions are constrained to be linear then
+ the resulting model is a vector generalized linear model
+ (VGLM). VGLMs are best fitted with \code{\link{vglm}}.
Vector (cubic smoothing spline) smoothers are represented
- by \code{s()} (see \code{\link[VGAM]{s}}).
- Local regression via \code{lo()} is \emph{not}
- supported. The results of \code{vgam} will differ from the S-PLUS and \R
- \code{gam} function (in the \pkg{gam} \R package) because \code{vgam}
- uses a different knot selection algorithm. In general, fewer knots
- are chosen because the computation becomes expensive when the number
- of additive predictors \eqn{M} is large.
+ by \code{s()} (see \code{\link[VGAM]{s}}). Local
+ regression via \code{lo()} is \emph{not} supported. The
+ results of \code{vgam} will differ from the \code{gam()}
+ (in the \pkg{gam}) because \code{vgam()} uses a different
+ knot selection algorithm. In general, fewer knots are
+ chosen because the computation becomes expensive when
+ the number of additive predictors \eqn{M} is large.
The underlying algorithm of VGAMs is iteratively
reweighted least squares (IRLS) and modified vector backfitting
using vector splines. B-splines are used as the basis functions
for the vector (smoothing) splines.
- \code{vgam.fit} is the function that actually does the work.
- The smoothing code is based on F. O'Sullivan's BART code.
+ \code{vgam.fit()} is the function that actually does the
+ work. The smoothing code is based on F. O'Sullivan's
+ BART code.
% If more than one of \code{etastart}, \code{start} and \code{mustart}
@@ -183,11 +186,11 @@ vgam(formula, family, data = list(), weights = NULL, subset = NULL,
A closely related methodology based on VGAMs called
- \emph{constrained additive ordination} (CAO)
- first forms a linear combination of the explanatory variables
- (called \emph{latent variables}) and then fits a GAM to these.
- This is implemented in the function \code{\link{cao}} for a very
- limited choice of family functions.
+ \emph{constrained additive ordination} (CAO) first forms
+ a linear combination of the explanatory variables (called
+ \emph{latent variables}) and then fits a GAM to these.
+ This is implemented in the function \code{\link{cao}}
+ for a very limited choice of family functions.
}
@@ -230,22 +233,23 @@ The \code{VGAM} Package.
\code{coefstart} and \code{mustart}.
- Some \pkg{VGAM} family functions end in \code{"ff"} to avoid
- interference with other functions, e.g., \code{\link{binomialff}},
- \code{\link{poissonff}}, \code{\link{gaussianff}},
- \code{gammaff}. This is because \pkg{VGAM} family
- functions are incompatible with \code{\link[stats]{glm}}
- (and also \code{\link[gam]{gam}} in the \pkg{gam} library and
- \code{\link[mgcv]{gam}} in the \pkg{mgcv} library).
+ Some \pkg{VGAM} family functions end in \code{"ff"}
+ to avoid interference with other functions, e.g.,
+ \code{\link{binomialff}}, \code{\link{poissonff}},
+ \code{\link{gaussianff}}, \code{gammaff}. This is
+ because \pkg{VGAM} family functions are incompatible with
+ \code{\link[stats]{glm}} (and also \code{\link[gam]{gam}}
+ in the \pkg{gam} library and \code{\link[mgcv]{gam}}
+ in the \pkg{mgcv} library).
- The smart prediction (\code{\link{smartpred}}) library is packed with
- the \pkg{VGAM} library.
+ The smart prediction (\code{\link{smartpred}}) library
+ is packed with the \pkg{VGAM} library.
- The theory behind the scaling parameter is currently being made more
- rigorous, but it it should give the same value as the scale parameter
- for GLMs.
+ The theory behind the scaling parameter is currently being
+ made more rigorous, but it it should give the same value
+ as the scale parameter for GLMs.
}
diff --git a/man/vgam.control.Rd b/man/vgam.control.Rd
index 8a6900a..3833880 100644
--- a/man/vgam.control.Rd
+++ b/man/vgam.control.Rd
@@ -5,6 +5,7 @@
\description{
Algorithmic constants and parameters for running \code{\link{vgam}}
are set using this function.
+
}
\usage{
vgam.control(all.knots = FALSE, bf.epsilon = 1e-07, bf.maxit = 30,
@@ -98,7 +99,7 @@ vgam.control(all.knots = FALSE, bf.epsilon = 1e-07, bf.maxit = 30,
\item{se.fit}{
logical indicating whether approximate
pointwise standard errors are to be saved on the object.
- If \code{TRUE}, then these can be plotted with \code{plot(..., se=TRUE)}.
+ If \code{TRUE}, then these can be plotted with \code{plot(..., se = TRUE)}.
}
\item{trace}{
@@ -126,24 +127,27 @@ vgam.control(all.knots = FALSE, bf.epsilon = 1e-07, bf.maxit = 30,
\details{
- Most of the control parameters are used within \code{vgam.fit} and
- you will have to look at that to understand the full details. Many of
- the control parameters are used in a similar manner by \code{vglm.fit}
- (\code{\link{vglm}}) because the algorithm (IRLS) is very similar.
+ Most of the control parameters are used within
+ \code{vgam.fit} and you will have to look at that
+ to understand the full details. Many of the control
+ parameters are used in a similar manner by \code{vglm.fit}
+ (\code{\link{vglm}}) because the algorithm (IRLS) is
+ very similar.
- Setting \code{save.weight=FALSE} is useful for some models because the
- \code{weights} slot of the object is often the largest and so less
- memory is used to store the object. However, for some \pkg{VGAM}
- family function, it is necessary to set \code{save.weight=TRUE} because
+ Setting \code{save.weight=FALSE} is useful for some
+ models because the \code{weights} slot of the object is
+ often the largest and so less memory is used to store the
+ object. However, for some \pkg{VGAM} family function,
+ it is necessary to set \code{save.weight=TRUE} because
the \code{weights} slot cannot be reconstructed later.
}
\value{
- A list with components matching the input names. A little error
- checking is done, but not much.
- The list is assigned to the \code{control} slot of \code{\link{vgam}} objects.
+ A list with components matching the input names. A little
+ error checking is done, but not much. The list is assigned
+ to the \code{control} slot of \code{\link{vgam}} objects.
}
@@ -161,9 +165,10 @@ Vector generalized additive models.
\author{ Thomas W. Yee}
\note{
- \code{\link{vgam}} does not implement half-stepsizing, therefore
- parametric models should be fitted with \code{\link{vglm}}. Also,
- \code{\link{vgam}} is slower than \code{\link{vglm}} too.
+ \code{\link{vgam}} does not implement half-stepsizing,
+ therefore parametric models should be fitted with
+ \code{\link{vglm}}. Also, \code{\link{vgam}} is slower
+ than \code{\link{vglm}} too.
}
diff --git a/man/vglm.Rd b/man/vglm.Rd
index 6b974b0..998e9de 100644
--- a/man/vglm.Rd
+++ b/man/vglm.Rd
@@ -21,9 +21,9 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
\item{formula}{
a symbolic description of the model to be fit.
- The RHS of the formula is applied to each linear predictor. Different
- variables in each linear predictor can be chosen by specifying
- constraint matrices.
+ The RHS of the formula is applied to each linear
+ predictor. Different variables in each linear predictor
+ can be chosen by specifying constraint matrices.
}
\item{family}{
@@ -37,26 +37,26 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
\item{data}{
an optional data frame containing the variables in the model.
By default the variables are taken from
- \code{environment(formula)}, typically the environment from which
- \code{vglm} is called.
+ \code{environment(formula)}, typically the environment
+ from which \code{vglm} is called.
}
\item{weights}{
- an optional vector or matrix of (prior) weights
- to be used in the fitting process.
- If \code{weights} is a matrix, then it must be in
- \emph{matrix-band} form, whereby the first \eqn{M}
- columns of the matrix are the
- diagonals, followed by the upper-diagonal band, followed by the
- band above that, etc. In this case, there can be up to \eqn{M(M+1)}
- columns, with the last column corresponding to the (1,\eqn{M}) elements
- of the weight matrices.
+ an optional vector or matrix of (prior) weights to be used
+ in the fitting process. If \code{weights} is a matrix,
+ then it must be in \emph{matrix-band} form, whereby the
+ first \eqn{M} columns of the matrix are the diagonals,
+ followed by the upper-diagonal band, followed by the
+ band above that, etc. In this case, there can be up to
+ \eqn{M(M+1)} columns, with the last column corresponding
+ to the (1,\eqn{M}) elements of the weight matrices.
}
\item{subset}{
an optional logical vector specifying a subset of
observations to
be used in the fitting process.
+
}
\item{na.action}{
a function which indicates what should happen when
@@ -96,14 +96,14 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
}
\item{offset}{
a vector or \eqn{M}-column matrix of offset values.
- These are \emph{a priori} known and are added to the linear predictors
- during fitting.
+ These are \emph{a priori} known and are added to the
+ linear predictors during fitting.
}
\item{method}{
the method to be used in fitting the model. The default (and
- presently only) method \code{vglm.fit} uses iteratively reweighted
- least squares (IRLS).
+ presently only) method \code{vglm.fit()} uses iteratively
+ reweighted least squares (IRLS).
}
\item{model}{
@@ -134,20 +134,21 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
The former is a subset of the latter.
The former has a matrix for each term of the LM matrix.
The latter has a matrix for each column of the VLM matrix.
- After fitting, the \code{\link{constraints}} extractor function may be applied;
- it returns
- the \code{"vlm"}-type list of constraint matrices by default.
- If \code{"lm"}-type are returned by \code{\link{constraints}} then
- these can be fed into this argument and it should give the
- same model as before.
+ After fitting, the \code{\link{constraints}}
+ extractor function may be applied; it returns
+ the \code{"vlm"}-type list of constraint matrices
+ by default. If \code{"lm"}-type are returned by
+ \code{\link{constraints}} then these can be fed into this
+ argument and it should give the same model as before.
- Each constraint matrix must have \eqn{M} rows, and be of full-column rank.
- By default, constraint matrices are the \eqn{M} by \eqn{M} identity
- matrix unless arguments in the family function itself override
- these values, e.g., \code{parallel} (see \code{\link{CommonVGAMffArguments}}).
- If \code{constraints} is used it must contain \emph{all} the
- terms; an incomplete list is not accepted.
+ Each constraint matrix must have \eqn{M} rows, and be of
+ full-column rank. By default, constraint matrices are
+ the \eqn{M} by \eqn{M} identity matrix unless arguments
+ in the family function itself override these values, e.g.,
+ \code{parallel} (see \code{\link{CommonVGAMffArguments}}).
+ If \code{constraints} is used it must contain \emph{all}
+ the terms; an incomplete list is not accepted.
}
@@ -167,9 +168,9 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
}
\item{qr.arg}{
- logical value indicating whether
- the slot \code{qr}, which returns the QR decomposition of the
- VLM model matrix, is returned on the object.
+ logical value indicating whether the slot \code{qr}, which
+ returns the QR decomposition of the VLM model matrix,
+ is returned on the object.
}
\item{smart}{
@@ -179,6 +180,7 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
}
\item{\dots}{
further arguments passed into \code{\link{vglm.control}}.
+
}
}
@@ -277,15 +279,18 @@ Reduced-rank vector generalized linear models.
\emph{Statistical Modelling},
\bold{3}, 15--41.
+
Yee, T. W. and Wild, C. J. (1996)
Vector generalized additive models.
\emph{Journal of the Royal Statistical Society, Series B, Methodological},
\bold{58}, 481--493.
+
Yee, T. W. (2008)
The \code{VGAM} Package.
\emph{R News}, \bold{8}, 28--39.
+
Documentation accompanying the \pkg{VGAM} package at
\url{http://www.stat.auckland.ac.nz/~yee}
contains further information and examples.
@@ -356,6 +361,7 @@ The \code{VGAM} Package.
\code{\link{predictvglm}},
\code{summary.vglm},
\code{AIC.vglm},
+ \code{\link{lrtest_vglm}},
etc.
}
@@ -365,7 +371,7 @@ The \code{VGAM} Package.
print(d.AD <- data.frame(treatment = gl(3, 3),
outcome = gl(3, 1, 9),
counts = c(18,17,15,20,10,20,25,13,12)))
-vglm.D93 = vglm(counts ~ outcome + treatment, family=poissonff,
+vglm.D93 = vglm(counts ~ outcome + treatment, family = poissonff,
data = d.AD, trace = TRUE)
summary(vglm.D93)
@@ -386,7 +392,7 @@ model.matrix(fit3) # Larger VGLM (or VLM) model matrix
# Example 4. Bivariate logistic model
fit4 = vglm(cbind(nBnW, nBW, BnW, BW) ~ age, binom2.or, coalminers)
coef(fit4, matrix = TRUE)
-fit4 at y # Response are proportions
+depvar(fit4) # Response are proportions
weights(fit4, type = "prior")
diff --git a/man/vglm.control.Rd b/man/vglm.control.Rd
index 97b14f6..a94da3f 100644
--- a/man/vglm.control.Rd
+++ b/man/vglm.control.Rd
@@ -17,37 +17,38 @@ vglm.control(checkwz = TRUE, criterion = names(.min.criterion.VGAM),
%- maybe also `usage' for other objects documented here.
\arguments{
\item{checkwz}{
- logical indicating whether the diagonal elements of
- the working weight matrices should be checked whether they are
- sufficiently positive, i.e., greater than \code{wzepsilon}. If not,
- any values less than \code{wzepsilon} are replaced with this value.
+ logical indicating whether the diagonal elements
+ of the working weight matrices should be checked
+ whether they are sufficiently positive, i.e., greater
+ than \code{wzepsilon}. If not, any values less than
+ \code{wzepsilon} are replaced with this value.
}
\item{criterion}{
- character variable describing what criterion is to
- be used to test for convergence.
- The possibilities are listed in \code{.min.criterion.VGAM}, but
- most family functions only implement a few of these.
+ character variable describing what criterion is to be
+ used to test for convergence. The possibilities are
+ listed in \code{.min.criterion.VGAM}, but most family
+ functions only implement a few of these.
}
\item{epsilon}{
- positive convergence tolerance epsilon. Roughly
- speaking, the Newton-Raphson/Fisher-scoring iterations
- are assumed to have
- converged when two successive \code{criterion} values are within
- \code{epsilon} of each other.
+ positive convergence tolerance epsilon. Roughly speaking,
+ the Newton-Raphson/Fisher-scoring iterations are assumed
+ to have converged when two successive \code{criterion}
+ values are within \code{epsilon} of each other.
}
\item{half.stepsizing}{
- logical indicating if half-stepsizing is
- allowed. For example, in maximizing a log-likelihood, if the
- next iteration has a log-likelihood that is less than the current
- value of the log-likelihood, then a half step will be taken.
- If the log-likelihood is still less than at the current position,
- a quarter-step will be taken etc. Eventually a step will be taken
- so that an improvement is made to the convergence criterion.
- \code{half.stepsizing} is ignored if
- \code{criterion=="coefficients"}.
+ logical indicating if half-stepsizing is allowed. For
+ example, in maximizing a log-likelihood, if the next
+ iteration has a log-likelihood that is less than
+ the current value of the log-likelihood, then a half
+ step will be taken. If the log-likelihood is still
+ less than at the current position, a quarter-step
+ will be taken etc. Eventually a step will be taken
+ so that an improvement is made to the convergence
+ criterion. \code{half.stepsizing} is ignored if
+ \code{criterion == "coefficients"}.
}
\item{maxit}{
@@ -56,38 +57,38 @@ vglm.control(checkwz = TRUE, criterion = names(.min.criterion.VGAM),
}
\item{stepsize}{
usual step size to be taken between each
- Newton-Raphson/Fisher-scoring iteration. It should be a value
- between 0 and 1, where
- a value of unity corresponds to an ordinary step.
- A value of 0.5 means half-steps are taken.
- Setting a value near zero will cause convergence to be generally slow
- but may help increase the chances of successful convergence for some
- family functions.
+ Newton-Raphson/Fisher-scoring iteration. It should be a
+ value between 0 and 1, where a value of unity corresponds
+ to an ordinary step. A value of 0.5 means half-steps are
+ taken. Setting a value near zero will cause convergence
+ to be generally slow but may help increase the chances
+ of successful convergence for some family functions.
}
\item{save.weight}{
- logical indicating whether the \code{weights} slot
- of a \code{"vglm"} object will be saved on the object. If not, it will
- be reconstructed when needed, e.g., \code{summary}.
- Some family functions have \code{save.weight=TRUE} and others have
- \code{save.weight=FALSE} in their control functions.
+ logical indicating whether the \code{weights} slot of a
+ \code{"vglm"} object will be saved on the object. If not,
+ it will be reconstructed when needed, e.g., \code{summary}.
+ Some family functions have \code{save.weight = TRUE} and
+ others have \code{save.weight = FALSE} in their control
+ functions.
}
\item{trace}{
- logical indicating if output should be produced for each iteration.
- Setting \code{trace=TRUE} is recommended in general because
- \pkg{VGAM} fits a very broad variety of models and distributions, and
- for some of them, convergence is intrinsically more difficult.
- Monitoring convergence can help check that the solution is reasonable
- or that a problem has occurred.
- It may suggest better initial values are needed,
- the making of invalid assumptions, or that
- the model is inappropriate for the data, etc.
+ logical indicating if output should be produced for each
+ iteration. Setting \code{trace = TRUE} is recommended in
+ general because \pkg{VGAM} fits a very broad variety of
+ models and distributions, and for some of them, convergence
+ is intrinsically more difficult. Monitoring convergence
+ can help check that the solution is reasonable or that
+ a problem has occurred. It may suggest better initial
+ values are needed, the making of invalid assumptions,
+ or that the model is inappropriate for the data, etc.
}
\item{wzepsilon}{
- small positive number used to test whether the diagonals of the working
- weight matrices are sufficiently positive.
+ small positive number used to test whether the diagonals
+ of the working weight matrices are sufficiently positive.
}
\item{xij}{
@@ -131,38 +132,43 @@ vglm.control(checkwz = TRUE, criterion = names(.min.criterion.VGAM),
}
}
\details{
- Most of the control parameters are used within \code{vglm.fit} and
- you will have to look at that to understand the full details.
+ Most of the control parameters are used within
+ \code{vglm.fit} and you will have to look at that to
+ understand the full details.
- Setting \code{save.weight=FALSE} is useful for some models because
+
+ Setting \code{save.weight = FALSE} is useful for some models because
the \code{weights} slot of the object is the largest and so less
memory is used to store the object. However, for some \pkg{VGAM}
- family function, it is necessary to set \code{save.weight=TRUE}
+ family function, it is necessary to set \code{save.weight = TRUE}
because the \code{weights} slot cannot be reconstructed later.
+
}
\value{
A list with components matching the input names. A little error
checking is done, but not much.
- The list is assigned to the \code{control} slot of \code{vglm} objects.
+ The list is assigned to the \code{control} slot of
+ \code{vglm} objects.
+
}
\references{
-Yee, T. W. and Hastie, T. J. (2003)
-Reduced-rank vector generalized linear models.
-\emph{Statistical Modelling},
-\bold{3}, 15--41.
+ Yee, T. W. and Hastie, T. J. (2003)
+ Reduced-rank vector generalized linear models.
+ \emph{Statistical Modelling},
+ \bold{3}, 15--41.
}
\author{ Thomas W. Yee}
\note{
Reiterating from above,
- setting \code{trace=TRUE} is recommended in general.
+ setting \code{trace = TRUE} is recommended in general.
-In Example 2 below there are two covariates that have linear/additive
-predictor specific values.
-These are handled using the \code{xij} argument.
+ In Example 2 below there are two covariates that have linear/additive
+ predictor specific values.
+ These are handled using the \code{xij} argument.
}
@@ -171,8 +177,8 @@ These are handled using the \code{xij} argument.
\seealso{
\code{\link{vglm}},
\code{\link{fill}}.
- The author's homepage has further documentation about the
- \code{xij} argument.
+ The author's homepage has further documentation about
+ the \code{xij} argument.
}
@@ -209,8 +215,6 @@ plotvgam(fit2, xlab = "z1") # Correct
}
-
-
# Example 3. The use of the xij argument (complex case).
set.seed(123)
coalminers = transform(coalminers,
@@ -226,12 +230,12 @@ NS = function(x, ..., df = 3) ns(c(x,...), df = df)[1:length(x),,drop = FALSE]
BS = function(x, ..., df = 3) head(bs(c(x,...), df = df), length(x), drop = FALSE)
NS = function(x, ..., df = 3) head(ns(c(x,...), df = df), length(x), drop = FALSE)
-fit3 = vglm(cbind(nBnW,nBW,BnW,BW) ~ Age + NS(dum1,dum2),
- fam = binom2.or(exchang = TRUE, zero = 3),
- xij = list(NS(dum1,dum2) ~ NS(dum1,dum2) +
- NS(dum2,dum1) +
- fill(NS(dum1))),
- form2 = ~ NS(dum1,dum2) + NS(dum2,dum1) + fill(NS(dum1)) +
+fit3 = vglm(cbind(nBnW,nBW,BnW,BW) ~ Age + NS(dum1, dum2),
+ fam = binom2.or(exchangeable = TRUE, zero = 3),
+ xij = list(NS(dum1, dum2) ~ NS(dum1, dum2) +
+ NS(dum2, dum1) +
+ fill(NS( dum1))),
+ form2 = ~ NS(dum1, dum2) + NS(dum2, dum1) + fill(NS(dum1)) +
dum1 + dum2 + dum3 + Age + age + dumm,
data = coalminers, trace = TRUE)
head(model.matrix(fit3, type = "lm")) # LM model matrix
diff --git a/man/vonmises.Rd b/man/vonmises.Rd
index a271ede..ad7ffad 100644
--- a/man/vonmises.Rd
+++ b/man/vonmises.Rd
@@ -8,8 +8,8 @@
}
\usage{
vonmises(llocation = "elogit", lscale = "loge",
- elocation = if(llocation == "elogit") list(min = 0, max = 2*pi)
- else list(), escale = list(), ilocation = NULL,
+ elocation = if (llocation == "elogit") list(min = 0, max = 2 * pi) else
+ list(), escale = list(), ilocation = NULL,
iscale = NULL, imethod = 1, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -97,6 +97,7 @@ Evans, M., Hastings, N. and Peacock, B. (2000)
\emph{Statistical Distributions},
New York: Wiley-Interscience, Third edition.
+
}
\author{ T. W. Yee }
\note{
@@ -105,6 +106,7 @@ New York: Wiley-Interscience, Third edition.
The linear/additive predictors are left alone.
Fisher scoring is used.
+
}
\section{Warning }{
Numerically, the von Mises can be difficult to fit because of a
@@ -112,6 +114,7 @@ New York: Wiley-Interscience, Third edition.
The user is therefore encouraged to try different starting values,
i.e., make use of \code{ilocation} and \code{iscale}.
+
}
\seealso{
@@ -128,8 +131,8 @@ vdata = transform(vdata, y = rnorm(nn, m = 2+x2, sd = exp(0.2))) # Bad data!!
fit = vglm(y ~ x2, vonmises(zero = 2), vdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
-with(vdata, range(y)) # original data
-range(fit at y) # processed data is in [0,2*pi)
+with(vdata, range(y)) # Original data
+range(depvar(fit)) # Processed data is in [0,2*pi)
}
\keyword{models}
\keyword{regression}
diff --git a/man/weibull.Rd b/man/weibull.Rd
index 734f7e9..c8da7af 100644
--- a/man/weibull.Rd
+++ b/man/weibull.Rd
@@ -99,7 +99,7 @@ weibull(lshape = "loge", lscale = "loge",
\references{
Kleiber, C. and Kotz, S. (2003)
\emph{Statistical Size Distributions in Economics and Actuarial Sciences},
-Hoboken, NJ: Wiley-Interscience.
+Hoboken, NJ, USA: Wiley-Interscience.
Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994)
diff --git a/man/weightsvglm.Rd b/man/weightsvglm.Rd
index 13189c9..2506732 100644
--- a/man/weightsvglm.Rd
+++ b/man/weightsvglm.Rd
@@ -120,7 +120,7 @@ weightsvglm(object, type = c("prior", "working"),
pneumo = transform(pneumo, let = log(exposure.time))
(fit = vglm(cbind(normal, mild, severe) ~ let,
cumulative(parallel = TRUE, reverse = TRUE), pneumo))
-fit at y # These are sample proportions
+depvar(fit) # These are sample proportions
weights(fit, type = "prior", matrix = FALSE) # Number of observations
# Look at the working residuals
diff --git a/man/xs.nz.Rd b/man/xs.nz.Rd
new file mode 100644
index 0000000..01bcab8
--- /dev/null
+++ b/man/xs.nz.Rd
@@ -0,0 +1,420 @@
+\name{xs.nz}
+\alias{xs.nz}
+\docType{data}
+\title{
+ Cross-sectional Data from the New Zealand Population
+
+}
+\description{
+ A cross-sectional data set of a workforce company, plus
+ another health survey, in New Zealand during the 1990s,
+
+%% ~~ A concise (1-5 lines) description of the dataset. ~~
+}
+\usage{data(xs.nz)}
+\format{
+ A data frame with 10529 observations on the following 58 variables.
+ For binary variables, a \code{"1"} or \code{TRUE} means \code{yes},
+ and \code{"0"} or \code{FALSE} means \code{no}.
+ Also, \code{"D"} means don't know,
+ and \code{"-"} means not applicable.
+ The pregnancy questions were administered to women only.
+
+
+ \describe{
+ \item{\code{regnum}}{a numeric vector,
+ a unique registration number.
+ This differs from their original registration number,
+ and the rows are sorted by their new registration number.
+
+ }
+ \item{\code{Study1}}{a logical vector, Study 1 (workforce) or Study 2?
+
+ }
+ \item{\code{age}}{a numeric vector, age in years.
+
+ }
+ \item{\code{sex}}{a factor with levels \code{F} and \code{M}.
+
+ }
+ \item{\code{pulse}}{a numeric vector, beats per minute.
+
+ }
+ \item{\code{sbp}}{a numeric vector, systolic blood pressure (mm Hg).
+
+ }
+ \item{\code{dbp}}{a numeric vector, diastolic blood pressure
+ (mm Hg).
+
+ }
+ \item{\code{cholest}}{a numeric vector, cholesterol (mmol/L).
+
+ }
+ \item{\code{height}}{a numeric vector, in m.
+
+ }
+ \item{\code{weight}}{a numeric vector, in kg.
+
+ }
+ \item{\code{famheart}}{a factor with levels \code{0}, \code{1},
+ \code{D}.
+ Has a family history of heart disease (heart attack, angina, or
+ had a heart bypass operation) within the immediate
+ family (brother, sister, father or mother, blood relatives only)?
+
+
+ }
+ \item{\code{famage}}{a factor, following from \code{famheart},
+ if yes, how old was the family member when it happened (if
+ more than one family member, give the age of the
+ youngest person)?
+
+
+ }
+ \item{\code{famcan}}{a factor with levels \code{0}, \code{1},
+ \code{D}.
+ Has a family history of cancer within the immediate
+ family (blood relatives only)?
+
+
+ }
+ \item{\code{heart}}{a factor, have you ever been told by
+ a doctor that you have had a heart attack ("coronary")?
+
+ }
+ \item{\code{stroke}}{a numeric vector, have you ever been told by
+ a doctor that you have had a stroke?
+
+ }
+ \item{\code{diabetes}}{a numeric vector, have you ever been told by
+ a doctor that you have had diabetes?
+
+
+ }
+ \item{\code{hyper}}{a numeric vector, have you ever been told by
+ a doctor that you have had high blood pressure (hypertension)?
+
+ }
+ \item{\code{hichol}}{a numeric vector, have you ever been told by
+ a doctor that you have had high cholesterol?
+
+
+ }
+ \item{\code{asthma}}{a numeric vector, have you ever been told by
+ a doctor that you have had asthma?
+
+
+ }
+ \item{\code{cancer}}{a numeric vector, have you ever been told by
+ a doctor that you have had cancer?
+
+ }
+ \item{\code{acne}}{a numeric vector, have you ever
+ received treatment from a doctor for acne?
+
+
+ }
+ \item{\code{sunburn}}{a numeric vector, have you ever
+ received treatment from a doctor for sunburn?
+
+ }
+ \item{\code{smokeever}}{a numeric vector, have you ever
+ smoked tailor-made or roll-you-own cigarettes once a
+ week or more?
+
+
+ }
+ \item{\code{smokenow}}{a numeric vector,
+ do you smoke tailor-made or roll-you-own cigarettes now?
+
+ }
+ \item{\code{smokeagequit}}{a factor,
+ if no to \code{smokenow}, how old were you when
+ you stopped smoking?
+
+
+ }
+ \item{\code{smokehowmany}}{a numeric vector,
+ if yes to \code{smokeever}, for how many years altogether
+ have you smoked tailor-made or roll-you-own cigarettes?
+
+
+ }
+ \item{\code{alcmonth}}{a numeric vector,
+ do you drink alcohol once a month or more?
+
+ }
+ \item{\code{drinkfreqweek}}{a numeric vector,
+ if yes to \code{alcmonth}, about how often do you
+ drink alcohol (days per week)?
+ Note: 0.25 is once a month,
+ 0.5 is once every two weeks,
+ 1 is once a week,
+ 2.5 is 2-3 days a week,
+ 4.5 is 4-5 days a week,
+ 6.5 is 6-7 days a week.
+
+ Further note:
+ 1 can, small bottle or handle of beer or home brew = 1 drink,
+ 1 quart bottle of beer = 2 drinks,
+ 1 jug of beer = 3 drinks,
+ 1 flagon/peter of beer = 6 drinks,
+ 1 glass of wine, sherry = 1 drink,
+ 1 bottle of wine = 6 drinks,
+ 1 double nip of spirits = 1 drink.
+
+
+ }
+ \item{\code{drinkweek}}{a numeric vector,
+ how many drinks per week, on average.
+ This is the average daily amount of drinks multiplied
+ by the frequency of drinking per week.
+ See \code{drinkfreqweek} on what constitutes a 'drink'.
+
+
+
+ }
+ \item{\code{drinkmaxday}}{a numeric vector,
+ in the last three months, what is the largest number of
+ drinks that you had on any one day?
+
+
+ }
+ \item{\code{pregnant}}{a factor,
+ have you ever been pregnant for more than 5 months?
+
+
+ }
+ \item{\code{pregfirst}}{a factor, if
+ yes to \code{pregnant}, how old were you when your first
+ baby was born (or you had a miscarriage after 5 months)?
+
+ }
+ \item{\code{preglast}}{a factor, how old were you when your last
+ baby was born (or you had a miscarriage after 5 months)?
+
+
+ }
+ \item{\code{babies}}{a factor,
+ how many babies have you given birth to?
+
+
+ }
+ \item{\code{mood}}{a numeric vector,
+ does your mood often go up or down?
+
+
+ }
+ \item{\code{miserab}}{a numeric vector,
+ do you ever feel 'just miserable' for no reason?
+
+
+ }
+ \item{\code{hurt}}{a numeric vector,
+ are your feelings easily hurt?
+
+ }
+ \item{\code{fedup}}{a numeric vector,
+ do you often feel 'fed up'?
+
+ }
+ \item{\code{nervous}}{a numeric vector,
+ would you call yourself a nervous person?
+
+ }
+ \item{\code{worrier}}{a numeric vector,
+ are you a worrier?
+
+ }
+ \item{\code{worry}}{a numeric vector,
+ do you worry about awful things that might happen?
+
+ }
+ \item{\code{tense}}{a numeric vector,
+ would you call yourself tense or 'highly strung'?
+
+ }
+ \item{\code{embarrass}}{a numeric vector,
+ do you worry too long after an embarrassing
+ experience?
+
+ }
+ \item{\code{nerves}}{a numeric vector,
+ do you suffer from 'nerves'?
+
+ }
+ \item{\code{friend}}{a numeric vector,
+ do you have a friend or family member that you
+ can talk to about problems or worries that you may have?
+
+ }
+ \item{\code{depress}}{a numeric vector,
+ in your lifetime, have you ever had two weeks or more
+ when nearly every day you felt sad or depressed?
+
+ }
+ \item{\code{exervig}}{a numeric vector,
+ how many hours per week would you do any vigorous
+ activity or exercise either at work or away from
+ work that makes you breathe hard and sweat?
+ Values here ought be be less than 168.
+
+
+ }
+ \item{\code{exermod}}{a numeric vector,
+ how many hours per week would you do any moderate
+ activity or exercise such as brisk walking, cycling or
+ mowing the lawn?
+ Values here ought be be less than 168.
+
+
+ }
+ \item{\code{hourfeet}}{a numeric vector,
+ on an average work day, how long would you spend on your
+ feet, either standing or moving about?
+
+
+ }
+ \item{\code{ethnic}}{a factor with 4 levels,
+ what ethnic group do you belong to?
+ 0 = European (NZ European or British or other European),
+ 1 = Maori,
+ 2 = Pacific Island Polynesian,
+ 3 = Other (Chinese, Indian, Other).
+
+
+ }
+ \item{\code{sleep}}{a numeric vector,
+ how many hours do you usually sleep each night?
+
+
+ }
+ \item{\code{snore}}{a factor with levels \code{0}, \code{1},
+ \code{D}.
+ Do you usually snore?
+
+
+ }
+ \item{\code{cat}}{a numeric vector,
+ do you have a household pet? If yes, is it a cat?
+
+ }
+ \item{\code{dog}}{a numeric vector,
+ do you have a household pet? If yes, is it a dog?
+
+
+ }
+ \item{\code{hand}}{a factor with levels
+ \code{0} = right,
+ \code{1} = left,
+ \code{2} = either.
+ Are you right-handed, left-handed, or no preference for left
+ or right?
+
+
+ }
+ \item{\code{nhouse}}{an ordered factor with 4 levels,
+ how many people (including yourself) usually live in your house?
+ 1 = 1, 2 = 2, 3 = 3, 4 = four or more.
+
+
+ }
+ \item{\code{marital}}{a factor with 4 levels:
+ \code{1} = single,
+ \code{2} = married or living with a partner,
+ \code{3} = separated or divorced,
+ \code{4} = widowed.
+
+
+ }
+ \item{\code{educ}}{an ordered factor with 4 levels.
+ What was the highest level of education you received?
+ Primary school = \code{1},
+ High school/secondary school = \code{2},
+ Polytechnic or similar = \code{3},
+ University = \code{4}.
+
+ }
+ }
+}
+\details{
+%% ~~ If necessary, more details than the __description__ above ~~
+
+The data frame is a subset of the entire data set which was
+collected from a confidential self-administered questionnaire
+administered in a large New Zealand workforce observational
+study conducted during 1992--3. The data were augmented
+by a second study consisting of retirees. The data can be
+considered a reasonable representation of the white male New
+Zealand population in the early 1990s. There were physical,
+lifestyle and psychological variables that were measured.
+The psychological variables were headed
+"Questions about your feelings".
+
+
+Although some data cleaning was performed and logic checks
+conducted, anomalies remain. Some variables, of course,
+are subject to a lot of measurement error and bias. It is
+conceivable that some participants had poor reading skills!
+
+
+}
+\source{
+
+ Originally,
+ Clinical Trials Research Unit, University of Auckland, New Zealand,
+ \url{http://www.ctru.auckland.ac.nz}.
+
+
+%% ~~ reference to a publication or URL from which the data were obtained ~~
+
+% MacMahon, S., Norton, R., Jackson, R., Mackie, M. J.,
+% Cheng, A., Vander Hoorn, S., Milne, A., McCulloch, A. (1995)
+% Fletcher Challenge-University of Auckland Heart &
+% Health Study: design and baseline findings.
+% \emph{New Zealand Medical Journal},
+% \bold{108}, 499--502.
+
+
+
+}
+\references{
+
+ MacMahon, S., Norton, R., Jackson, R., Mackie, M. J.,
+ Cheng, A., Vander Hoorn, S., Milne, A., McCulloch, A. (1995)
+ Fletcher Challenge-University of Auckland Heart &
+ Health Study: design and baseline findings.
+ \emph{New Zealand Medical Journal},
+ \bold{108}, 499--502.
+
+}
+
+\seealso{
+ \code{\link{chest.nz}}.
+
+
+
+}
+\section{Warning }{
+ More variables may be added in the future and these
+ may be placed in any column position. Therefore
+ references such as \code{xs.nz[, 12]} are dangerous.
+
+
+}
+
+
+
+
+\examples{
+data(xs.nz)
+summary(xs.nz)
+
+# Handling of factors requires care
+is.factor(xs.nz$babies) # TRUE
+summary(xs.nz$babies) # Note the "-"s
+charbabies <- as.character(xs.nz$babies)
+summary(as.numeric(charbabies)) # "-"s converted to NAs + warning
+table(as.numeric(charbabies)) # Ditto
+}
+\keyword{datasets}
diff --git a/man/yip88.Rd b/man/yip88.Rd
index 06b1902..4934fb4 100644
--- a/man/yip88.Rd
+++ b/man/yip88.Rd
@@ -84,7 +84,7 @@ model.
The estimate of \eqn{\phi}{phi} is placed in the \code{misc} slot as
- \code{@misc$phi}. However, this estimate is computed only for intercept
+ \code{@misc$pstr0}. However, this estimate is computed only for intercept
models, i.e., the formula is of the form \code{y ~ 1}.
diff --git a/man/zabinomUC.Rd b/man/zabinomUC.Rd
new file mode 100644
index 0000000..f54cd20
--- /dev/null
+++ b/man/zabinomUC.Rd
@@ -0,0 +1,81 @@
+\name{Zabinom}
+\alias{Zabinom}
+\alias{dzabinom}
+\alias{pzabinom}
+\alias{qzabinom}
+\alias{rzabinom}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Zero-Altered Binomial Distribution }
+\description{
+ Density, distribution function, quantile function and random generation
+ for the zero-altered binomial distribution with parameter \code{pobs0}.
+
+}
+\usage{
+dzabinom(x, size, prob, pobs0 = 0, log = FALSE)
+pzabinom(q, size, prob, pobs0 = 0)
+qzabinom(p, size, prob, pobs0 = 0)
+rzabinom(n, size, prob, pobs0 = 0)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations.
+ If \code{length(n) > 1} then the length is taken to be the number required.
+ }
+ \item{size, prob, log}{
+ Parameters from the ordinary binomial distribution
+ (see \code{\link[stats:Binomial]{dbinom}}).
+
+ }
+ \item{pobs0}{
+ Probability of (an observed) zero, called \eqn{pobs0}.
+ The default value of \code{pobs0 = 0} corresponds
+ to the response having a positive binomial distribution.
+
+ }
+}
+\details{
+ The probability function of \eqn{Y} is 0 with probability \code{pobs0},
+ else a positive
+ binomial(size, prob)
+ distribution.
+
+}
+\value{
+ \code{dzabinom} gives the density and
+ \code{pzabinom} gives the distribution function,
+ \code{qzabinom} gives the quantile function, and
+ \code{rzabinom} generates random deviates.
+
+}
+%\references{ }
+\author{ Thomas W. Yee }
+\note{
+ The argument \code{pobs0} is recycled to the required length, and
+ must have values which lie in the interval \eqn{[0,1]}.
+
+}
+
+\seealso{
+% \code{\link{zabinomial}},
+ \code{\link{zibinomial}},
+ \code{\link{rposbinom}}.
+
+}
+\examples{
+size <- 10; prob = 0.15; pobs0 <- 0.05; x <- (-1):7
+dzabinom(x, size = size, prob = prob, pobs0 = pobs0)
+table(rzabinom(100, size = size, prob = prob, pobs0 = pobs0))
+
+\dontrun{ x = 0:10
+barplot(rbind(dzabinom(x, size = size, prob = prob, pobs0 = pobs0),
+ dbinom(x, size = size, prob = prob)),
+ beside = TRUE, col = c("blue", "orange"), cex.main = 0.7, las = 1,
+ ylab = "Probability", names.arg = as.character(x),
+ main = paste("ZAB(size = ", size, ", prob = ", prob, ", pobs0 = ", pobs0,
+ ") [blue] vs", " Binom(size = ", size, ", prob = ", prob,
+ ") [orange] densities", sep = "")) }
+}
+\keyword{distribution}
diff --git a/man/zabinomial.Rd b/man/zabinomial.Rd
new file mode 100644
index 0000000..1d069b0
--- /dev/null
+++ b/man/zabinomial.Rd
@@ -0,0 +1,135 @@
+\name{zabinomial}
+\alias{zabinomial}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Zero-Altered Binomial Distribution }
+\description{
+ Fits a zero-altered binomial distribution based on
+ a conditional model involving a Bernoulli distribution and a
+ positive-binomial distribution.
+
+}
+\usage{
+zabinomial(lprob = "logit", eprob = list(),
+ lpobs0 = "logit", epobs0 = list(),
+ iprob = NULL, ipobs0 = NULL,
+ imethod = 1, zero = 2)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lprob}{
+ Parameter link function applied to the probability parameter
+ of the binomial distribution.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{lpobs0}{
+ Link function for the parameter \eqn{p_0}{pobs0}, called \code{pobs0} here.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{eprob, epobs0}{
+ List. Extra argument for the respective links.
+ See \code{earg} in \code{\link{Links}} for general information.
+
+ }
+ \item{iprob, ipobs0}{
+ \code{\link{CommonVGAMffArguments}}.
+
+ }
+ \item{imethod, zero}{
+ See
+ \code{\link{CommonVGAMffArguments}}.
+
+ }
+}
+
+\details{
+ The response \eqn{Y} is zero with probability \eqn{p_0}{pobs0},
+ else \eqn{Y} has a positive-binomial distribution with
+ probability \eqn{1-p_0}{1-pobs0}. Thus \eqn{0 < p_0 < 1}{0 < pobs0 < 1},
+ which may be modelled as a function of the covariates.
+ The zero-altered binomial distribution differs from the
+ zero-inflated binomial distribution in that the former
+ has zeros coming from one source, whereas the latter
+ has zeros coming from the binomial distribution too. The
+ zero-inflated binomial distribution is implemented in
+ \code{\link{zibinomial}}.
+ Some people call the zero-altered binomial a \emph{hurdle} model.
+
+
+ The input is currently a vector or one-column matrix.
+ Dy default, the two linear/additive
+ predictors are \eqn{(\log(p), logit(p_0))^T}{(log(prob), logit(pobs0))^T}.
+
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+
+ The \code{fitted.values} slot of the fitted object,
+ which should be extracted by the generic function \code{fitted}, returns
+ the mean \eqn{\mu}{mu} which is given by
+ \deqn{\mu = (1-p_0) \mu_{b} / [1 - (1 - \mu_{b})^N]}{%
+ mu = (1-pobs0) * mub / [1 - (1 - mub)^N]}
+ where \eqn{\mu_{b}}{mub} is the usual binomial mean.
+
+}
+%\references{
+%
+%
+%}
+%\section{Warning }{
+%
+%}
+
+\author{ T. W. Yee }
+\note{
+
+ The response should be a two-column matrix of counts,
+ with first column giving the number of successes.
+
+
+ Note this family function allows \eqn{p_0}{pobs0} to be modelled as
+ functions of the covariates by having \code{zero = NULL}.
+ It is a conditional model, not a mixture model.
+
+
+ This family function effectively combines
+ \code{\link{posbinomial}} and \code{\link{binomialff}} into
+ one family function.
+
+}
+
+\seealso{
+ \code{\link{dzabinom}},
+ \code{\link{zibinomial}},
+ \code{\link{posbinomial}},
+ \code{\link{binomialff}},
+ \code{\link[stats:Binomial]{dbinom}},
+ \code{\link{CommonVGAMffArguments}}.
+
+}
+
+\examples{
+zdata <- data.frame(x2 = runif(nn <- 1000))
+zdata <- transform(zdata,
+ size = 10,
+ prob = logit(-2 + 3*x2, inverse = TRUE),
+ pobs0 = logit(-1 + 2*x2, inverse = TRUE))
+zdata <- transform(zdata,
+ y1 = rzabinom(nn, size = size, prob = prob, pobs0 = pobs0))
+with(zdata, table(y1))
+
+fit <- vglm(cbind(y1, size - y1) ~ x2, zabinomial(zero = NULL),
+ zdata, trace = TRUE)
+coef(fit, matrix = TRUE)
+head(fitted(fit))
+head(predict(fit))
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/zageomUC.Rd b/man/zageomUC.Rd
new file mode 100644
index 0000000..1e89c9e
--- /dev/null
+++ b/man/zageomUC.Rd
@@ -0,0 +1,81 @@
+\name{Zageom}
+\alias{Zageom}
+\alias{dzageom}
+\alias{pzageom}
+\alias{qzageom}
+\alias{rzageom}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Zero-Altered Geometric Distribution }
+\description{
+ Density, distribution function, quantile function and random generation
+ for the zero-altered geometric distribution with parameter \code{pobs0}.
+
+}
+\usage{
+dzageom(x, prob, pobs0 = 0, log = FALSE)
+pzageom(q, prob, pobs0 = 0)
+qzageom(p, prob, pobs0 = 0)
+rzageom(n, prob, pobs0 = 0)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations.
+ If \code{length(n) > 1} then the length is taken to be the number required.
+ }
+ \item{prob, log}{
+ Parameters from the ordinary geometric distribution
+ (see \code{\link[stats:Geometric]{dgeom}}).
+
+ }
+ \item{pobs0}{
+ Probability of (an observed) zero, called \eqn{pobs0}.
+ The default value of \code{pobs0 = 0} corresponds
+ to the response having a positive geometric distribution.
+
+ }
+}
+\details{
+ The probability function of \eqn{Y} is 0 with probability \code{pobs0},
+ else a positive
+ geometric(prob)
+ distribution.
+
+}
+\value{
+ \code{dzageom} gives the density and
+ \code{pzageom} gives the distribution function,
+ \code{qzageom} gives the quantile function, and
+ \code{rzageom} generates random deviates.
+
+}
+%\references{ }
+\author{ Thomas W. Yee }
+\note{
+ The argument \code{pobs0} is recycled to the required length, and
+ must have values which lie in the interval \eqn{[0,1]}.
+
+}
+
+\seealso{
+ \code{\link{zageometric}},
+ \code{\link{zigeometric}},
+ \code{\link{rposgeom}}.
+
+}
+\examples{
+prob = 0.35; pobs0 <- 0.05; x <- (-1):7
+dzageom(x, prob = prob, pobs0 = pobs0)
+table(rzageom(100, prob = prob, pobs0 = pobs0))
+
+\dontrun{ x = 0:10
+barplot(rbind(dzageom(x, prob = prob, pobs0 = pobs0),
+ dgeom(x, prob = prob)),
+ beside = TRUE, col = c("blue", "orange"), cex.main = 0.7, las = 1,
+ ylab = "Probability", names.arg = as.character(x),
+ main = paste("ZAG(prob = ", prob, ", pobs0 = ", pobs0,
+ ") [blue] vs", " Geometric(prob = ", prob,
+ ") [orange] densities", sep = "")) }
+}
+\keyword{distribution}
diff --git a/man/zageometric.Rd b/man/zageometric.Rd
new file mode 100644
index 0000000..303adf7
--- /dev/null
+++ b/man/zageometric.Rd
@@ -0,0 +1,142 @@
+\name{zageometric}
+\alias{zageometric}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Zero-Altered Geometric Distribution }
+\description{
+ Fits a zero-altered geometric distribution based on
+ a conditional model involving a Bernoulli distribution and a
+ positive-geometric distribution.
+
+}
+\usage{
+zageometric(lpobs0 = "logit", lprob = "logit",
+ epobs0 = list(), eprob = list(),
+ imethod = 1, ipobs0 = NULL, iprob = NULL, zero = NULL)
+
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lpobs0}{
+ Link function for the parameter \eqn{p_0}{pobs0} or \eqn{\phi}{phi},
+ called \code{pobs0} or \code{phi} here.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{lprob}{
+ Parameter link function applied to the probability of success,
+ called \code{prob}
+ or \eqn{p}.
+ See \code{\link{Links}} for more choices.
+
+ }
+ \item{epobs0, eprob}{
+ List. Extra argument for the respective links.
+ See \code{earg} in \code{\link{Links}} for general information.
+
+ }
+ \item{ipobs0, iprob}{
+ Optional initial values for the parameters.
+ If given, they must be in range.
+ For multi-column responses, these are recycled sideways.
+
+ }
+ \item{zero, imethod}{
+ See
+ \code{\link{CommonVGAMffArguments}}.
+
+ }
+}
+
+\details{
+ The response \eqn{Y} is zero with probability \eqn{p_0}{pobs0},
+ or \eqn{Y} has a positive-geometric distribution with
+ probability \eqn{1-p_0}{1-pobs0}. Thus \eqn{0 < p_0 < 1}{0 < pobs0 < 1},
+ which is modelled as a function of the covariates. The zero-altered
+ geometric distribution differs from the zero-inflated
+ geometric distribution in that the former has zeros coming from one
+ source, whereas the latter has zeros coming from the geometric
+ distribution too. The zero-inflated geometric distribution
+ is implemented in the \pkg{VGAM} package. Some people
+ call the zero-altered geometric a \emph{hurdle} model.
+
+ The input can be a matrix.
+ By default, the two linear/additive
+ predictors are \eqn{(\log(\phi), logit(p))^T}{(log(phi), logit(prob))^T}.
+
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+
+ The \code{fitted.values} slot of the fitted object,
+ which should be extracted by the generic function \code{fitted}, returns
+ the mean \eqn{\mu}{mu} which is given by
+ \deqn{\mu = (1-\phi) / p.}{%
+ mu = (1- phi) / p.}
+
+}
+%\references{
+%
+%
+%}
+\section{Warning }{
+ Convergence for this \pkg{VGAM} family function seems to depend quite
+ strongly on providing good initial values.
+
+
+ Inference obtained from \code{summary.vglm} and \code{summary.vgam}
+ may or may not be correct. In particular, the p-values, standard errors
+ and degrees of freedom may need adjustment. Use simulation on artificial
+ data to check that these are reasonable.
+
+
+}
+
+\author{ T. W. Yee }
+\note{
+
+ Note this family function allows \eqn{p_0}{pobs0} to be modelled as
+ functions of the covariates. It is a conditional model, not a mixture
+ model.
+
+
+ This family function effectively combines
+ \code{\link{binomialff}} and
+ \code{posgeometric()} and \code{\link{geometric}} into
+ one family function.
+ However, \code{posgeometric()} is not written because it
+ is trivially related to \code{\link{geometric}}.
+
+}
+
+\seealso{
+ \code{\link{dzageom}},
+% \code{\link{posgeometric}},
+ \code{\link{geometric}},
+ \code{\link{zigeometric}},
+ \code{\link[stats:Geometric]{dgeom}},
+ \code{\link{CommonVGAMffArguments}}.
+
+}
+
+\examples{
+zdata <- data.frame(x2 = runif(nn <- 1000))
+zdata <- transform(zdata,
+ pobs0 = logit(-1 + 2*x2, inverse = TRUE),
+ prob = logit(-2 + 3*x2, inverse = TRUE))
+zdata <- transform(zdata,
+ y1 = rzageom(nn, prob = prob, pobs0 = pobs0))
+with(zdata, table(y1))
+
+fit <- vglm(y1 ~ x2, zageometric, zdata, trace = TRUE)
+coef(fit, matrix = TRUE)
+head(fitted(fit))
+head(predict(fit))
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/zanegbinUC.Rd b/man/zanegbinUC.Rd
index ced39b5..dcef65f 100644
--- a/man/zanegbinUC.Rd
+++ b/man/zanegbinUC.Rd
@@ -8,14 +8,14 @@
\title{ Zero-Altered Negative Binomial Distribution }
\description{
Density, distribution function, quantile function and random generation
- for the zero-altered negative binomial distribution with parameter \code{p0}.
+ for the zero-altered negative binomial distribution with parameter \code{pobs0}.
}
\usage{
-dzanegbin(x, p0, size, prob = NULL, munb = NULL, log = FALSE)
-pzanegbin(q, p0, size, prob = NULL, munb = NULL)
-qzanegbin(p, p0, size, prob = NULL, munb = NULL)
-rzanegbin(n, p0, size, prob = NULL, munb = NULL)
+dzanegbin(x, size, prob = NULL, munb = NULL, pobs0 = 0, log = FALSE)
+pzanegbin(q, size, prob = NULL, munb = NULL, pobs0 = 0)
+qzanegbin(p, size, prob = NULL, munb = NULL, pobs0 = 0)
+rzanegbin(n, size, prob = NULL, munb = NULL, pobs0 = 0)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -29,15 +29,15 @@ rzanegbin(n, p0, size, prob = NULL, munb = NULL)
Some arguments have been renamed slightly.
}
- \item{p0}{
- Probability of zero, called \eqn{p0}.
- The default value of \code{p0 = 0} corresponds
+ \item{pobs0}{
+ Probability of zero, called \eqn{pobs0}.
+ The default value of \code{pobs0 = 0} corresponds
to the response having a positive negative binomial distribution.
}
}
\details{
- The probability function of \eqn{Y} is 0 with probability \code{p0},
+ The probability function of \eqn{Y} is 0 with probability \code{pobs0},
else a positive
negative binomial(\eqn{\mu_{nb}}{munb}, size)
distribution.
@@ -53,7 +53,7 @@ rzanegbin(n, p0, size, prob = NULL, munb = NULL)
%\references{ }
\author{ Thomas W. Yee }
\note{
- The argument \code{p0} is recycled to the required length, and
+ The argument \code{pobs0} is recycled to the required length, and
must have values which lie in the interval \eqn{[0,1]}.
}
@@ -64,17 +64,18 @@ rzanegbin(n, p0, size, prob = NULL, munb = NULL)
}
\examples{
-munb <- 3; size <- 4; p0 <- 0.3; x <- (-1):7
-dzanegbin(x, p0 = p0, munb = munb, size = size)
-table(rzanegbin(100, p0 = p0, munb = munb, size = size))
+munb <- 3; size <- 4; pobs0 <- 0.3; x <- (-1):7
+dzanegbin(x, munb = munb, size = size, pobs0 = pobs0)
+table(rzanegbin(100, munb = munb, size = size, pobs0 = pobs0))
\dontrun{ x = 0:10
-barplot(rbind(dzanegbin(x, p0 = p0, munb = munb, size = size),
- dnbinom(x, mu = munb, size = size)),
+barplot(rbind(dzanegbin(x, munb = munb, size = size, pobs0 = pobs0),
+ dnbinom(x, mu = munb, size = size)),
beside = TRUE, col = c("blue","green"), cex.main = 0.7, las = 1,
ylab = "Probability",names.arg = as.character(x),
- main = paste("ZANB(p0 =", p0, ", munb =", munb, ", size =", size,
- ") [blue] vs", " NB(mu =", munb, ", size =", size,
- ") [green] densities", sep="")) }
+ main = paste("ZANB(munb = ", munb, ", size = ", size,",
+ pobs0 = ", pobs0,
+ ") [blue] vs", " NB(mu = ", munb, ", size = ", size,
+ ") [green] densities", sep = "")) }
}
\keyword{distribution}
diff --git a/man/zanegbinomial.Rd b/man/zanegbinomial.Rd
index bc52cdd..2d778f5 100644
--- a/man/zanegbinomial.Rd
+++ b/man/zanegbinomial.Rd
@@ -9,15 +9,16 @@
}
\usage{
-zanegbinomial(lp0 = "logit", lmunb = "loge", lsize = "loge",
- ep0 = list(), emunb = list(), esize = list(), ipnb0 = NULL,
- isize = NULL, zero = -3, cutoff = 0.995, imethod = 1,
- shrinkage.init = 0.95)
+zanegbinomial(lpobs0 = "logit", lmunb = "loge", lsize = "loge",
+ epobs0 = list(), emunb = list(), esize = list(),
+ ipobs0 = NULL, isize = NULL,
+ zero = c(-1, -3), imethod = 1,
+ nsimEIM = 250, shrinkage.init = 0.95)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{lp0}{
- Link function for the parameter \eqn{p_0}{p0}, called \code{p0} here.
+ \item{lpobs0}{
+ Link function for the parameter \eqn{p_0}{pobs0}, called \code{pobs0} here.
See \code{\link{Links}} for more choices.
}
@@ -34,44 +35,38 @@ zanegbinomial(lp0 = "logit", lmunb = "loge", lsize = "loge",
See \code{\link{Links}} for more choices.
}
- \item{ep0, emunb, esize}{
+ \item{epobs0, emunb, esize}{
List. Extra argument for the respective links.
See \code{earg} in \code{\link{Links}} for general information.
}
- \item{ipnb0}{
- Optional initial values for the probability \eqn{p_0}{p0}.
- If given, they must be in \eqn{(0,1)},
- and it is ok to give one value each for each response/species.
-
- }
- \item{isize}{
- Optional initial values for \code{k}.
- If given, they must be positive, and give one value
- for each response/species.
+ \item{ipobs0, isize}{
+ Optional initial values for \eqn{p_0}{pobs0} and \code{k}.
+ If given, it is okay to give one value
+ for each response/species by inputting a vector whose length
+ is the number of columns of the response matrix.
}
\item{zero}{
- Integer valued vector, usually assigned \eqn{-3} or \eqn{3} if
- used at all. Specifies which of the three linear predictors are
- modelled as an intercept only. By default, the \code{k} parameter
- (after \code{lsize} is applied) for each response is modelled as
- a single unknown number that is estimated. It can be modelled as a
+ Integer valued vector, may be assigned, e.g., \eqn{-3} or \eqn{3} if
+ the probability of an observed value is to be modelled with the
+ covariates.
+ Specifies which of the three linear predictors are
+ modelled as an intercept only. By default, the \code{k} and \eqn{p_0}{pobs0}
+ parameters for each response are modelled as
+ single unknown numbers that are estimated.
+ All parameters can be modelled as a
function of the explanatory variables by setting \code{zero = NULL}.
A negative value means that the value is recycled, so setting \eqn{-3}
means all \code{k} are intercept-only.
See \code{\link{CommonVGAMffArguments}} for more information.
}
- \item{cutoff}{
- A numeric which is close to 1 but never exactly 1. Used to
- specify how many terms of the infinite series are actually used.
- The sum of the probabilites are added until they reach this value
- or more. It is like specifying \code{p} in an imaginary function
- \code{qnegbin(p)}.
+ \item{nsimEIM, imethod}{
+ See \code{\link{CommonVGAMffArguments}}.
}
- \item{imethod, shrinkage.init}{
+ \item{shrinkage.init}{
See \code{\link{negbinomial}}
and \code{\link{CommonVGAMffArguments}}.
@@ -79,9 +74,9 @@ zanegbinomial(lp0 = "logit", lmunb = "loge", lsize = "loge",
}
\details{
- The response \eqn{Y} is zero with probability \eqn{p_0}{p0},
+ The response \eqn{Y} is zero with probability \eqn{p_0}{pobs0},
or \eqn{Y} has a positive-negative binomial distribution with
- probability \eqn{1-p_0}{1-p0}. Thus \eqn{0 < p_0 < 1}{0 < p0 < 1},
+ probability \eqn{1-p_0}{1-pobs0}. Thus \eqn{0 < p_0 < 1}{0 < pobs0 < 1},
which is modelled as a function of the covariates. The zero-altered
negative binomial distribution differs from the zero-inflated negative
binomial distribution in that the former has zeros coming from one
@@ -92,7 +87,7 @@ zanegbinomial(lp0 = "logit", lmunb = "loge", lsize = "loge",
For one response/species, by default, the three linear/additive
- predictors are \eqn{(logit(p_0), \log(\mu_{nb}), \log(k))^T}{(logit(p0),
+ predictors are \eqn{(logit(p_0), \log(\mu_{nb}), \log(k))^T}{(logit(pobs0),
log(munb), log(k))^T}. This vector is recycled for multiple species.
@@ -107,7 +102,7 @@ zanegbinomial(lp0 = "logit", lmunb = "loge", lsize = "loge",
which should be extracted by the generic function \code{fitted}, returns
the mean \eqn{\mu}{mu} which is given by
\deqn{\mu = (1-p_0) \mu_{nb} / [1 - (k/(k+\mu_{nb}))^k].}{%
- mu = (1-p0) * munb / [1 - (k/(k+munb))^k].}
+ mu = (1-pobs0) * munb / [1 - (k/(k+munb))^k].}
}
\references{
@@ -137,9 +132,10 @@ for counts with extra zeros.
\author{ T. W. Yee }
\note{
- Note this family function allows \eqn{p_0}{p0} to be modelled as
- functions of the covariates. It is a conditional model, not a mixture
- model.
+ Note this family function allows \eqn{p_0}{pobs0} to be modelled as
+ functions of the covariates provided \code{zero} is set correctly.
+ It is a conditional model, not a mixture model.
+ Simulated Fisher scoring is the algorithm.
This family function effectively combines
@@ -167,17 +163,15 @@ for counts with extra zeros.
}
\examples{
-zdata <- data.frame(x = runif(nn <- 2000))
+zdata <- data.frame(x2 = runif(nn <- 2000))
+zdata <- transform(zdata, pobs0 = logit(-1 + 2*x2, inverse = TRUE))
zdata <- transform(zdata,
- p0 = logit(-1 + 2*x, inverse = TRUE),
- y1 = rposnegbin(nn, munb = exp(0+2*x), size = exp(1)),
- y2 = rposnegbin(nn, munb = exp(1+2*x), size = exp(1)))
-zdata <- transform(zdata, y1 = ifelse(runif(nn) < p0, 0, y1),
- y2 = ifelse(runif(nn) < p0, 0, y2))
+ y1 = rzanegbin(nn, munb = exp(0+2*x2), size = exp(1), pobs0 = pobs0),
+ y2 = rzanegbin(nn, munb = exp(1+2*x2), size = exp(1), pobs0 = pobs0))
with(zdata, table(y1))
with(zdata, table(y2))
-fit <- vglm(cbind(y1, y2) ~ x, zanegbinomial, zdata, trace = TRUE)
+fit <- vglm(cbind(y1, y2) ~ x2, zanegbinomial, zdata, trace = TRUE)
coef(fit, matrix = TRUE)
head(fitted(fit))
head(predict(fit))
diff --git a/man/zapoisUC.Rd b/man/zapoisUC.Rd
index 1443de9..49717ba 100644
--- a/man/zapoisUC.Rd
+++ b/man/zapoisUC.Rd
@@ -8,14 +8,14 @@
\title{ Zero-Altered Poisson Distribution }
\description{
Density, distribution function, quantile function and random generation
- for the zero-altered Poisson distribution with parameter \code{p0}.
+ for the zero-altered Poisson distribution with parameter \code{pobs0}.
}
\usage{
-dzapois(x, lambda, p0 = 0, log = FALSE)
-pzapois(q, lambda, p0 = 0)
-qzapois(p, lambda, p0 = 0)
-rzapois(n, lambda, p0 = 0)
+dzapois(x, lambda, pobs0 = 0, log = FALSE)
+pzapois(q, lambda, pobs0 = 0)
+qzapois(p, lambda, pobs0 = 0)
+rzapois(n, lambda, pobs0 = 0)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -24,9 +24,9 @@ rzapois(n, lambda, p0 = 0)
\item{n}{number of observations.
If \code{length(n) > 1} then the length is taken to be the number required. }
\item{lambda}{ Vector of positive means. }
- \item{p0}{
- Probability of zero, called \eqn{p0}.
- The default value of \code{p0 = 0} corresponds
+ \item{pobs0}{
+ Probability of zero, called \eqn{pobs0}.
+ The default value of \code{pobs0 = 0} corresponds
to the response having a positive Poisson distribution.
}
@@ -34,7 +34,7 @@ rzapois(n, lambda, p0 = 0)
}
\details{
The probability function of \eqn{Y} is 0 with probability
- \code{p0}, else a positive \eqn{Poisson(\lambda)}{Poisson(lambda)}.
+ \code{pobs0}, else a positive \eqn{Poisson(\lambda)}{Poisson(lambda)}.
}
\value{
@@ -46,7 +46,7 @@ rzapois(n, lambda, p0 = 0)
%\references{ }
\author{ Thomas W. Yee }
\note{
- The argument \code{p0} is recycled to the required length, and
+ The argument \code{pobs0} is recycled to the required length, and
must have values which lie in the interval \eqn{[0,1]}.
}
@@ -56,17 +56,17 @@ rzapois(n, lambda, p0 = 0)
}
\examples{
-lambda = 3; p0 = 0.2; x = (-1):7
-(ii = dzapois(x, lambda, p0))
-max(abs(cumsum(ii) - pzapois(x, lambda, p0))) # Should be 0
-table(rzapois(100, lambda, p0))
-table(qzapois(runif(100), lambda, p0))
-round(dzapois(0:10, lambda, p0) * 100) # Should be similar
+lambda = 3; pobs0 = 0.2; x = (-1):7
+(ii = dzapois(x, lambda, pobs0))
+max(abs(cumsum(ii) - pzapois(x, lambda, pobs0))) # Should be 0
+table(rzapois(100, lambda, pobs0))
+table(qzapois(runif(100), lambda, pobs0))
+round(dzapois(0:10, lambda, pobs0) * 100) # Should be similar
\dontrun{ x = 0:10
-barplot(rbind(dzapois(x, lambda, p0), dpois(x, lambda)),
+barplot(rbind(dzapois(x, lambda, pobs0), dpois(x, lambda)),
beside = TRUE, col = c("blue", "green"), las = 1,
- main = paste("ZAP(", lambda, ", p0 = ", p0, ") [blue] vs",
+ main = paste("ZAP(", lambda, ", pobs0 = ", pobs0, ") [blue] vs",
" Poisson(", lambda, ") [green] densities", sep = ""),
names.arg = as.character(x), ylab = "Probability") }
}
diff --git a/man/zapoisson.Rd b/man/zapoisson.Rd
index 957a18c..ca03364 100644
--- a/man/zapoisson.Rd
+++ b/man/zapoisson.Rd
@@ -4,18 +4,18 @@
\title{ Zero-Altered Poisson Distribution }
\description{
Fits a zero-altered Poisson distribution based on a conditional
- model involving a binomial distribution
+ model involving a Bernoulli distribution
and a positive-Poisson distribution.
}
\usage{
-zapoisson(lp0 = "logit", llambda = "loge", ep0 = list(),
+zapoisson(lpobs0 = "logit", llambda = "loge", epobs0 = list(),
elambda = list(), zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{lp0}{
- Link function for the parameter \eqn{p_0}{p0}, called \code{p0} here.
+ \item{lpobs0}{
+ Link function for the parameter \eqn{p_0}{pobs0}, called \code{pobs0} here.
See \code{\link{Links}} for more choices.
}
@@ -24,40 +24,45 @@ zapoisson(lp0 = "logit", llambda = "loge", ep0 = list(),
See \code{\link{Links}} for more choices.
}
- \item{ep0, elambda}{
+ \item{epobs0, elambda}{
Extra argument for the respective links.
See \code{earg} in \code{\link{Links}} for general information.
}
\item{zero}{
- Integer valued vector, usually assigned \eqn{-1} or \eqn{1} if used
- at all. Specifies which of the two linear/additive predictors are
- modelled as an intercept only.
- By default, both linear/additive predictors are modelled using
- the explanatory variables.
- If \code{zero = 1} then the \eqn{p_0}{p0} parameter
- (after \code{lp0} is applied) is modelled as a single unknown
- number that is estimated. It is modelled as a function of the
- explanatory variables by \code{zero = NULL}. A negative value
- means that the value is recycled, so setting \eqn{-1} means
- all \eqn{p_0}{p0} are intercept-only (for multivariate responses).
See \code{\link{CommonVGAMffArguments}} for more information.
+% Integer valued vector, usually assigned \eqn{-1} or \eqn{1} if used
+% at all. Specifies which of the two linear/additive predictors are
+% modelled as an intercept only.
+% By default, both linear/additive predictors are modelled using
+% the explanatory variables.
+% If \code{zero = 1} then the \eqn{p_0}{pobs0} parameter
+% (after \code{lpobs0} is applied) is modelled as a single unknown
+% number that is estimated. It is modelled as a function of the
+% explanatory variables by \code{zero = NULL}. A negative value
+% means that the value is recycled, so setting \eqn{-1} means
+% all \eqn{p_0}{pobs0} are intercept-only (for multivariate responses).
+
+
}
}
\details{
- The response \eqn{Y} is zero with probability \eqn{p_0}{p0}, or \eqn{Y}
- has a positive-Poisson(\eqn{\lambda)}{lambda)} distribution with
- probability \eqn{1-p_0}{1-p0}. Thus \eqn{0 < p_0 < 1}{0 < p0 < 1},
- which is modelled as a function of the covariates. The zero-altered
- Poisson distribution differs from the zero-inflated Poisson distribution
- in that the former has zeros coming from one source, whereas the latter
- has zeros coming from the Poisson distribution too. Some people call
- the zero-altered Poisson a \emph{hurdle} model.
+ The response \eqn{Y} is zero with probability \eqn{p_0}{pobs0},
+ else \eqn{Y} has a positive-Poisson(\eqn{\lambda)}{lambda)}
+ distribution with probability \eqn{1-p_0}{1-pobs0}. Thus \eqn{0
+ < p_0 < 1}{0 < pobs0 < 1}, which is modelled as a function of
+ the covariates. The zero-altered Poisson distribution differs
+ from the zero-inflated Poisson distribution in that the former
+ has zeros coming from one source, whereas the latter has zeros
+ coming from the Poisson distribution too. Some people call the
+ zero-altered Poisson a \emph{hurdle} model.
- For one response/species, by default, the two linear/additive predictors
- are \eqn{(logit(p_0), \log(\lambda))^T}{(logit(p0), log(lambda))^T}.
+ For one response/species, by default, the two linear/additive
+ predictors are \eqn{(logit(p_0), \log(\lambda))^T}{(logit(pobs0),
+ log(lambda))^T}.
+ Fisher scoring is implemented.
}
@@ -68,16 +73,16 @@ zapoisson(lp0 = "logit", llambda = "loge", ep0 = list(),
The \code{fitted.values} slot of the fitted object,
- which should be extracted by the generic function \code{fitted}, returns
- the mean \eqn{\mu}{mu} which is given by
+ which should be extracted by the generic function \code{fitted},
+ returns the mean \eqn{\mu}{mu} which is given by
\deqn{\mu = (1-p_0) \lambda / [1 - \exp(-\lambda)].}{%
- mu = (1-p0) * lambda / [1 - exp(-lambda)].}
+ mu = (1-pobs0) * lambda / [1 - exp(-lambda)].}
}
\references{
-Welsh, A. H., Cunningham, R. B., Donnelly, C. F. and Lindenmayer,
-D. B. (1996)
+Welsh, A. H., Cunningham, R. B., Donnelly, C. F. and
+Lindenmayer, D. B. (1996)
Modelling the abundances of rare species: statistical models
for counts with extra zeros.
\emph{Ecological Modelling},
@@ -86,8 +91,7 @@ for counts with extra zeros.
Angers, J-F. and Biswas, A. (2003)
-A Bayesian analysis of zero-inflated generalized Poisson
-model.
+A Bayesian analysis of zero-inflated generalized Poisson model.
\emph{Computational Statistics & Data Analysis},
\bold{42}, 37--46.
@@ -98,35 +102,39 @@ contains further information and examples.
}
-\section{Warning }{
- Inference obtained from \code{summary.vglm}
- and \code{summary.vgam} may or may not be correct.
- In particular, the p-values, standard errors and degrees of
- freedom may need adjustment. Use simulation on artificial
- data to check that these are reasonable.
-}
+
+%20111123; this has been fixed up with proper FS using EIM.
+%\section{Warning }{
+% Inference obtained from \code{summary.vglm}
+% and \code{summary.vgam} may or may not be correct.
+% In particular, the p-values, standard errors and degrees of
+% freedom may need adjustment. Use simulation on artificial
+% data to check that these are reasonable.
+%
+%
+%}
+
\author{ T. W. Yee }
\note{
There are subtle differences between this family function and
\code{\link{zipoisson}} and \code{\link{yip88}}.
In particular, \code{\link{zipoisson}} is a
- \emph{mixture} model whereas \code{zapoisson} and \code{\link{yip88}}
+ \emph{mixture} model whereas \code{zapoisson()} and \code{\link{yip88}}
are \emph{conditional} models.
- Note this family function allows \eqn{p_0}{p0} to be modelled
- as functions of the covariates. It can be thought of an extension
- of \code{\link{yip88}}, which is also a conditional model but its
- \eqn{\phi}{phi} parameter is a scalar only.
+ Note this family function allows \eqn{p_0}{pobs0} to be modelled
+ as functions of the covariates.
+% It can be thought of an extension
+% of \code{\link{yip88}}, which is also a conditional model but its
+% \eqn{\phi}{phi} parameter is a scalar only.
This family function effectively combines \code{\link{pospoisson}}
and \code{\link{binomialff}} into one family function.
-
-
This family function can handle a multivariate response,
e.g., more than one species.
@@ -146,17 +154,18 @@ contains further information and examples.
\examples{
zapdata <- data.frame(x2 = runif(nn <- 1000))
-zapdata <- transform(zapdata, p0 = logit( -1 + 1*x2, inverse = TRUE),
+zapdata <- transform(zapdata, pobs0 = logit( -1 + 1*x2, inverse = TRUE),
lambda = loge(-0.5 + 2*x2, inverse = TRUE))
-zapdata <- transform(zapdata, y = rzapois(nn, lambda, p0 = p0))
+zapdata <- transform(zapdata, y = rzapois(nn, lambda, pobs0 = pobs0))
with(zapdata, table(y))
fit <- vglm(y ~ x2, zapoisson, zapdata, trace = TRUE)
-fit <- vglm(y ~ x2, zapoisson, zapdata, trace = TRUE, crit = "c")
+fit <- vglm(y ~ x2, zapoisson, zapdata, trace = TRUE, crit = "coef")
head(fitted(fit))
head(predict(fit))
head(predict(fit, untransform = TRUE))
coef(fit, matrix = TRUE)
+summary(fit)
# Another example ------------------------------
@@ -164,9 +173,9 @@ coef(fit, matrix = TRUE)
abdata <- data.frame(y = 0:7, w = c(182, 41, 12, 2, 2, 0, 0, 1))
abdata <- subset(abdata, w > 0)
yy <- with(abdata, rep(y, w))
-fit3 <- vglm(yy ~ 1, zapoisson, trace = TRUE, crit = "c")
+fit3 <- vglm(yy ~ 1, zapoisson, trace = TRUE, crit = "coef")
coef(fit3, matrix = TRUE)
-Coef(fit3) # Estimate of lambda (they get 0.6997 with standard error 0.1520)
+Coef(fit3) # Estimate lambda (they get 0.6997 with SE 0.1520)
head(fitted(fit3), 1)
mean(yy) # compare this with fitted(fit3)
}
diff --git a/man/zero.Rd b/man/zero.Rd
index a82feeb..7d8997c 100644
--- a/man/zero.Rd
+++ b/man/zero.Rd
@@ -101,7 +101,7 @@ args(binom2.or)
args(gpd)
#LMS quantile regression example
-fit = vglm(BMI ~ bs(age, df = 4), lms.bcg(zero = c(1,3)), bminz, trace = TRUE)
+fit = vglm(BMI ~ bs(age, df = 4), lms.bcg(zero = c(1,3)), bmi.nz, trace = TRUE)
coef(fit, matrix = TRUE)
}
\keyword{models}
diff --git a/man/zibinomUC.Rd b/man/zibinomUC.Rd
index 82f0d56..bdfd276 100644
--- a/man/zibinomUC.Rd
+++ b/man/zibinomUC.Rd
@@ -9,14 +9,14 @@
\description{
Density, distribution function, quantile function and random
generation for the zero-inflated binomial distribution with
- parameter \code{phi}.
+ parameter \code{pstr0}.
}
\usage{
-dzibinom(x, size, prob, log = FALSE, phi = 0)
-pzibinom(q, size, prob, lower.tail = TRUE, log.p = FALSE, phi = 0)
-qzibinom(p, size, prob, lower.tail = TRUE, log.p = FALSE, phi = 0)
-rzibinom(n, size, prob, phi = 0)
+dzibinom(x, size, prob, pstr0 = 0, log = FALSE)
+pzibinom(q, size, prob, pstr0 = 0, lower.tail = TRUE, log.p = FALSE)
+qzibinom(p, size, prob, pstr0 = 0, lower.tail = TRUE, log.p = FALSE)
+rzibinom(n, size, prob, pstr0 = 0)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -28,11 +28,11 @@ rzibinom(n, size, prob, phi = 0)
\item{n}{number of observations. Must be a single positive integer. }
\item{log, log.p, lower.tail}{ Arguments that are passed on to
\code{\link[stats:Binomial]{pbinom}}.}
- \item{phi}{
- Probability of zero (ignoring the binomial distribution), called
- \eqn{\phi}{phi}. The default value of \eqn{\phi=0}{phi=0}
- corresponds to the response having an ordinary binomial
- distribution.
+ \item{pstr0}{
+ Probability of a structural zero (i.e., ignoring the binomial distribution),
+ called \eqn{\phi}{phi}.
+ The default value of \eqn{\phi=0}{phi=0} corresponds to
+ the response having an ordinary binomial distribution.
}
}
@@ -57,8 +57,16 @@ rzibinom(n, size, prob, phi = 0)
%\references{ }
\author{ Thomas W. Yee }
\note{
- The argument \code{phi} is recycled to the required length,
- and must have values which lie in the interval \eqn{[0,1]}.
+ The argument \code{pstr0} is recycled to the required length,
+ and must have values which lie in the interval \eqn{[0,1]}.
+
+
+ These functions actually allow for \emph{zero-deflation}.
+ That is, the resulting probability of a zero count
+ is \emph{less than} the nominal value of the parent
+ distribution.
+ See \code{\link{Zipois}} for more information.
+
}
@@ -68,19 +76,19 @@ rzibinom(n, size, prob, phi = 0)
}
\examples{
-prob = 0.2; size = 10; phi = 0.5
-(ii = dzibinom(0:size, size, prob, phi = phi))
-max(abs(cumsum(ii) - pzibinom(0:size, size, prob, phi = phi))) # Should be 0
-table(rzibinom(100, size, prob, phi = phi))
+prob = 0.2; size = 10; pstr0 = 0.5
+(ii = dzibinom(0:size, size, prob, pstr0 = pstr0))
+max(abs(cumsum(ii) - pzibinom(0:size, size, prob, pstr0 = pstr0))) # Should be 0
+table(rzibinom(100, size, prob, pstr0 = pstr0))
-table(qzibinom(runif(100), size, prob, phi = phi))
-round(dzibinom(0:10, size, prob, phi = phi) * 100) # Should be similar
+table(qzibinom(runif(100), size, prob, pstr0 = pstr0))
+round(dzibinom(0:10, size, prob, pstr0 = pstr0) * 100) # Should be similar
\dontrun{ x = 0:size
-barplot(rbind(dzibinom(x, size, prob, phi = phi),
+barplot(rbind(dzibinom(x, size, prob, pstr0 = pstr0),
dbinom(x, size, prob)),
beside = TRUE, col = c("blue", "green"), ylab = "Probability",
- main = paste("ZIB(", size, ", ", prob, ", phi = ", phi, ") (blue) vs",
+ main = paste("ZIB(", size, ", ", prob, ", pstr0 = ", pstr0, ") (blue) vs",
" Binomial(", size, ", ", prob, ") (green)", sep=""),
names.arg = as.character(x), las = 1, lwd = 2) }
}
diff --git a/man/zibinomial.Rd b/man/zibinomial.Rd
index b935e1a..f10bd40 100644
--- a/man/zibinomial.Rd
+++ b/man/zibinomial.Rd
@@ -8,24 +8,26 @@
}
\usage{
-zibinomial(lphi = "logit", lmu = "logit", ephi = list(), emu = list(),
- iphi = NULL, zero = 1, mv = FALSE, imethod = 1)
+zibinomial(lpstr0 = "logit", lprob = "logit",
+ epstr0 = list(), eprob = list(),
+ ipstr0 = NULL, zero = 1, mv = FALSE, imethod = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{lphi, lmu}{
+ \item{lpstr0, lprob}{
Link functions for the parameter \eqn{\phi}{phi}
- and the usual binomial probability \eqn{\mu}{mu} parameter.
+ and the usual binomial probability \eqn{\mu}{prob} parameter.
See \code{\link{Links}} for more choices.
+ For the zero-\emph{deflated} model see below.
}
- \item{ephi, emu}{
+ \item{epstr0, eprob}{
List. Extra argument for the respective links.
See \code{earg} in \code{\link{Links}} for general information.
}
- \item{iphi}{
+ \item{ipstr0}{
Optional initial values for \eqn{\phi}{phi}, whose values must lie
between 0 and 1. The default is to compute an initial value internally.
If a vector then recyling is used.
@@ -35,7 +37,7 @@ zibinomial(lphi = "logit", lmu = "logit", ephi = list(), emu = list(),
% An integer specifying which linear/additive predictor is modelled
% as intercepts only. If given, the value must be either 1 or 2,
% and the default is the first. Setting \code{zero = NULL} enables both
-% \eqn{\phi}{phi} and \eqn{\mu}{mu} to be modelled as a function of
+% \eqn{\phi}{phi} and \eqn{\mu}{prob} to be modelled as a function of
% the explanatory variables.
% See \code{\link{CommonVGAMffArguments}} for more information.
@@ -54,18 +56,19 @@ zibinomial(lphi = "logit", lmu = "logit", ephi = list(), emu = list(),
\details{
This function uses Fisher scoring and is based on
\deqn{P(Y=0) = \phi + (1-\phi) (1-\mu)^N,}{%
- P(Y=0) = phi + (1-phi) * (1-mu)^N,}
+ P(Y=0) = phi + (1- phi) * (1-prob)^N,}
for \eqn{y=0}, and
\deqn{P(Y=y) = (1-\phi) {N \choose Ny} \mu^{Ny} (1-\mu)^{N(1-y)}.}{%
- P(Y=y) = (1-phi) * choose(N,Ny) * mu^(N*y) * (1-mu)^(N*(1-y)).}
+ P(Y=y) = (1-phi) * choose(N,Ny) * prob^(N*y) * (1-prob)^(N*(1-y)).}
for \eqn{y=1/N,2/N,\ldots,1}. That is, the response is a sample
proportion out of \eqn{N} trials, and the argument \code{size} in
\code{\link{rzibinom}} is \eqn{N} here.
- The parameter \eqn{\phi}{phi} satisfies \eqn{0 < \phi < 1}{0 <
- phi < 1}. The mean of \eqn{Y} is \eqn{E(Y)=(1-\phi) \mu}{E(Y)
- = (1-phi) * mu} and these are returned as the fitted values.
- By default, the two linear/additive predictors are \eqn{(logit(\phi),
- logit(\mu))^T}{(logit(phi), logit(mu))^T}.
+ The parameter \eqn{\phi}{phi} is the probability of a structural zero,
+ and it satisfies \eqn{0 < \phi < 1}{0 < phi < 1}.
+ The mean of \eqn{Y} is \eqn{E(Y)=(1-\phi) \mu}{E(Y) = (1-phi) * prob}
+ and these are returned as the fitted values.
+ By default, the two linear/additive predictors
+ are \eqn{(logit(\phi), logit(\mu))^T}{(logit(phi), logit(prob))^T}.
}
@@ -88,19 +91,23 @@ zibinomial(lphi = "logit", lmu = "logit", ephi = list(), emu = list(),
specifying the values of \eqn{N}.
- To work well, one needs \eqn{N>1} and \eqn{\mu>0}{mu>0}, i.e.,
- the larger \eqn{N} and \eqn{\mu}{mu} are, the better.
+ To work well, one needs \eqn{N>1} and \eqn{\mu>0}{prob>0}, i.e.,
+ the larger \eqn{N} and \eqn{\mu}{prob} are, the better.
For intercept-models and constant \eqn{N} over the \eqn{n} observations,
- the \code{misc} slot has a component called \code{p0} which is the
- estimate of \eqn{P(Y=0)}. This family function currently cannot handle
- a multivariate response (only \code{mv = FALSE} can be handled).
+ the \code{misc} slot has a component called \code{pobs0} which is the
+ estimate of the probability of an observed 0, i.e., \eqn{P(Y=0)}.
+ This family function currently cannot handle a multivariate
+ response (only \code{mv = FALSE} can be handled).
-% The zero-\emph{deflated} binomial distribution cannot be handled with
-% this family function. It can be handled with the zero-altered binomial
-% distribution; see \code{\link{zabinomial}}.
+ The zero-\emph{deflated} binomial distribution might
+ be fitted by setting \code{lpstr0 = identity}, albeit,
+ not entirely reliably. See \code{\link{zipoisson}}
+ for information that can be applied here. Else
+ try the zero-altered binomial distribution (see
+ \code{\link{zabinomial}}).
}
@@ -108,7 +115,7 @@ zibinomial(lphi = "logit", lmu = "logit", ephi = list(), emu = list(),
\section{Warning }{
Numerical problems can occur.
Half-stepping is not uncommon.
- If failure to converge occurs, make use of the argument \code{iphi}.
+ If failure to converge occurs, make use of the argument \code{ipstr0}.
}
@@ -122,17 +129,17 @@ zibinomial(lphi = "logit", lmu = "logit", ephi = list(), emu = list(),
\examples{
size = 10 # Number of trials; N in the notation above
nn = 200
-zibdata = data.frame(phi = logit( 0, inverse = TRUE), # 0.50
+zibdata = data.frame(pstr0 = logit( 0, inverse = TRUE), # 0.50
mubin = logit(-1, inverse = TRUE), # Mean of usual binomial
sv = rep(size, length = nn))
zibdata = transform(zibdata,
- y = rzibinom(nn, size = sv, prob = mubin, phi = phi))
+ y = rzibinom(nn, size = sv, prob = mubin, pstr0 = pstr0))
with(zibdata, table(y))
fit = vglm(cbind(y, sv - y) ~ 1, zibinomial, zibdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit) # Useful for intercept-only models
-fit at misc$p0 # Estimate of P(Y = 0)
+fit at misc$pobs0 # Estimate of P(Y = 0)
head(fitted(fit))
with(zibdata, mean(y)) # Compare this with fitted(fit)
summary(fit)
diff --git a/man/zigeomUC.Rd b/man/zigeomUC.Rd
index c63d3fa..4913b19 100644
--- a/man/zigeomUC.Rd
+++ b/man/zigeomUC.Rd
@@ -8,14 +8,14 @@
\title{ Zero-Inflated Geometric Distribution }
\description{
Density, and random generation
- for the zero-inflated geometric distribution with parameter \code{pszero}.
+ for the zero-inflated geometric distribution with parameter \code{pstr0}.
}
\usage{
-dzigeom(x, prob, pszero = 0, log = FALSE)
-pzigeom(q, prob, pszero = 0)
-qzigeom(p, prob, pszero = 0)
-rzigeom(n, prob, pszero = 0)
+dzigeom(x, prob, pstr0 = 0, log = FALSE)
+pzigeom(q, prob, pstr0 = 0)
+qzigeom(p, prob, pstr0 = 0)
+rzigeom(n, prob, pstr0 = 0)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -23,7 +23,7 @@ rzigeom(n, prob, pszero = 0)
\item{p}{vector of probabilities.}
\item{prob}{see \code{\link[stats]{dgeom}}.}
\item{n}{number of observations. }
- \item{pszero}{
+ \item{pstr0}{
Probability of structural zero (ignoring the geometric distribution),
called \eqn{\phi}{phi}. The default value corresponds
to the response having an ordinary geometric distribution.
@@ -52,9 +52,16 @@ rzigeom(n, prob, pszero = 0)
%\references{ }
\author{ Thomas W. Yee }
\note{
- The argument \code{pszero} is recycled to the required length, and
+ The argument \code{pstr0} is recycled to the required length, and
must have values which lie in the interval \eqn{[0,1]}.
+ These functions actually allow for \emph{zero-deflation}.
+ That is, the resulting probability of a zero count
+ is \emph{less than} the nominal value of the parent
+ distribution.
+ See \code{\link{Zipois}} for more information.
+
+
}
\seealso{
@@ -64,16 +71,16 @@ rzigeom(n, prob, pszero = 0)
}
\examples{
-prob = 0.5; pszero = 0.2; x = (-1):20
-(ii = dzigeom(x, prob, pszero))
-max(abs(cumsum(ii) - pzigeom(x, prob, pszero))) # Should be 0
-table(rzigeom(1000, prob, pszero))
+prob = 0.5; pstr0 = 0.2; x = (-1):20
+(ii = dzigeom(x, prob, pstr0))
+max(abs(cumsum(ii) - pzigeom(x, prob, pstr0))) # Should be 0
+table(rzigeom(1000, prob, pstr0))
\dontrun{ x = 0:10
-barplot(rbind(dzigeom(x, prob, pszero), dgeom(x, prob)),
+barplot(rbind(dzigeom(x, prob, pstr0), dgeom(x, prob)),
beside = TRUE, col = c("blue","orange"),
ylab = "P[Y = y]", xlab = "y", las = 1,
- main = paste("zigeometric(", prob, ", pszero = ", pszero,
+ main = paste("zigeometric(", prob, ", pstr0 = ", pstr0,
") (blue) vs",
" geometric(", prob, ") (orange)", sep = ""),
names.arg = as.character(x)) }
diff --git a/man/zigeometric.Rd b/man/zigeometric.Rd
index 84e9265..09db7cc 100644
--- a/man/zigeometric.Rd
+++ b/man/zigeometric.Rd
@@ -9,30 +9,32 @@
}
\usage{
zigeometric(lprob = "logit", eprob = list(),
- lpszero = "logit", epszero = list(),
- iprob = NULL, ipszero = NULL,
+ lpstr0 = "logit", epstr0 = list(),
+ iprob = NULL, ipstr0 = NULL,
imethod = 1, bias.red = 0.5, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{lprob, lpszero}{
- Link functions for the parameters \code{prob} and \eqn{\phi}{phi}.
+ \item{lprob, lpstr0}{
+ Link functions for the parameters \eqn{p}{prob} (\code{prob})
+ and \eqn{\phi}{phi}.
The usual geometric probability parameter is the former.
The probability of a structural zero is the latter.
See \code{\link{Links}} for more choices.
+ For the zero-\emph{deflated} model see below.
}
- \item{eprob, epszero}{
+ \item{eprob, epstr0}{
List. Extra argument for the respective links.
See \code{earg} in \code{\link{Links}} for general information.
}
\item{bias.red}{
- A constant used in the initialization process of \code{pszero}.
+ A constant used in the initialization process of \code{pstr0}.
It should lie between 0 and 1, with 1 having no effect.
}
- \item{iprob, ipszero}{
+ \item{iprob, ipstr0}{
See \code{\link{CommonVGAMffArguments}} for information.
}
@@ -43,16 +45,16 @@ zigeometric(lprob = "logit", eprob = list(),
}
\details{
This function uses Fisher scoring and is based on
- \deqn{P(Y=0) = \phi + (1-\phi) prob,}{%
+ \deqn{P(Y=0) = \phi + (1-\phi) p,}{%
P(Y=0) = phi + (1-phi) * prob,}
for \eqn{y=0}, and
- \deqn{P(Y=y) = (1-\phi) prob (1 - prob)^{y}.}{%
+ \deqn{P(Y=y) = (1-\phi) p (1 - p)^{y}.}{%
P(Y=y) = (1-phi) * prob * (1 - prob)^y.}
for \eqn{y=1,2,\ldots}.
The parameter \eqn{\phi}{phi} satisfies \eqn{0 < \phi < 1}{0 <
- phi < 1}. The mean of \eqn{Y} is \eqn{E(Y)=(1-\phi) prob / (1-prob)}{E(Y)
+ phi < 1}. The mean of \eqn{Y} is \eqn{E(Y)=(1-\phi) p / (1-p)}{E(Y)
= (1-phi) * prob / (1-prob)} and these are returned as the fitted values.
- By default, the two linear/additive predictors are \eqn{(logit(prob),
+ By default, the two linear/additive predictors are \eqn{(logit(p),
logit(\phi))^T}{(logit(prob), logit(phi))^T}.
@@ -70,21 +72,30 @@ zigeometric(lprob = "logit", eprob = list(),
\author{ T. W. Yee }
\note{
- Numerical problems may occur since the initial values are currently
- not very good.
+% Numerical problems may occur since the initial values are currently
+% not very good.
-}
-\section{Warning }{
- Numerical problems can occur.
- Half-stepping is not uncommon.
- If failure to converge occurs, make use of the argument \code{ipszero}.
+ The zero-\emph{deflated} geometric distribution might
+ be fitted by setting \code{lpstr0 = identity}, albeit,
+ not entirely reliably. See \code{\link{zipoisson}}
+ for information that can be applied here. Else
+ try the zero-altered geometric distribution (see
+ \code{\link{zageometric}}).
+
+}
-}
+%\section{Warning }{
+% Numerical problems can occur.
+% Half-stepping is not uncommon.
+% If failure to converge occurs, make use of the argument \code{ipstr0}.
+%
+%}
\seealso{
\code{\link{rzigeom}},
\code{\link{geometric}},
+ \code{\link{zageometric}},
\code{\link[stats]{rgeom}}.
}
@@ -98,9 +109,9 @@ gdata = transform(gdata, eta1 = 1.0 - 1.0 * x2 + 2.0 * x3,
gdata = transform(gdata, prob1 = logit(eta1, inverse = TRUE),
prob2 = logit(eta2, inverse = TRUE),
prob3 = logit(eta3, inverse = TRUE))
-gdata = transform(gdata, y1 = rzigeom(nn, prob1, pszero = prob3),
- y2 = rzigeom(nn, prob2, pszero = prob3),
- y3 = rzigeom(nn, prob2, pszero = prob3))
+gdata = transform(gdata, y1 = rzigeom(nn, prob1, pstr0 = prob3),
+ y2 = rzigeom(nn, prob2, pstr0 = prob3),
+ y3 = rzigeom(nn, prob2, pstr0 = prob3))
with(gdata, table(y1))
with(gdata, table(y2))
with(gdata, table(y3))
diff --git a/man/zinegbinUC.Rd b/man/zinegbinUC.Rd
index 7874a8b..5f43bbd 100644
--- a/man/zinegbinUC.Rd
+++ b/man/zinegbinUC.Rd
@@ -8,14 +8,15 @@
\title{ Zero-Inflated Negative Binomial Distribution }
\description{
Density, distribution function, quantile function and random generation
- for the zero-inflated negative binomial distribution with parameter \code{phi}.
+ for the zero-inflated negative binomial distribution with
+ parameter \code{pstr0}.
}
\usage{
-dzinegbin(x, phi, size, prob = NULL, munb = NULL, log = FALSE)
-pzinegbin(q, phi, size, prob = NULL, munb = NULL)
-qzinegbin(p, phi, size, prob = NULL, munb = NULL)
-rzinegbin(n, phi, size, prob = NULL, munb = NULL)
+dzinegbin(x, size, prob = NULL, munb = NULL, pstr0 = 0, log = FALSE)
+pzinegbin(q, size, prob = NULL, munb = NULL, pstr0 = 0)
+qzinegbin(p, size, prob = NULL, munb = NULL, pstr0 = 0)
+rzinegbin(n, size, prob = NULL, munb = NULL, pstr0 = 0)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -30,9 +31,10 @@ rzinegbin(n, phi, size, prob = NULL, munb = NULL)
\emph{component}.
}
- \item{phi}{
- Probability of zero (ignoring the negative binomial distribution), called
- \eqn{\phi}{phi}.
+ \item{pstr0}{
+ Probability of structural zero
+ (i.e., ignoring the negative binomial distribution),
+ called \eqn{\phi}{phi}.
}
}
@@ -44,9 +46,10 @@ rzinegbin(n, phi, size, prob = NULL, munb = NULL)
P(Y=0) = phi + (1-phi) * P(W=0)}
where \eqn{W} is distributed as a negative binomial distribution
(see \code{\link[stats:NegBinomial]{rnbinom}}.)
- See \code{\link{negbinomial}}, a \pkg{VGAM} family function,
- for the formula of the probability density function and other details
- of the negative binomial distribution.
+ See \code{\link{negbinomial}}, a \pkg{VGAM} family
+ function, for the formula of the probability density
+ function and other details of the negative binomial
+ distribution.
}
@@ -61,8 +64,17 @@ rzinegbin(n, phi, size, prob = NULL, munb = NULL)
%\references{ }
\author{ Thomas W. Yee }
\note{
- The argument \code{phi} is recycled to the required length, and
- must have values which lie in the interval \eqn{[0,1]}.
+ The argument \code{pstr0} is recycled to the required
+ length, and must have values which lie in the interval
+ \eqn{[0,1]}.
+
+
+ These functions actually allow for \emph{zero-deflation}.
+ That is, the resulting probability of a zero count
+ is \emph{less than} the nominal value of the parent
+ distribution.
+ See \code{\link{Zipois}} for more information.
+
}
@@ -74,20 +86,19 @@ rzinegbin(n, phi, size, prob = NULL, munb = NULL)
}
\examples{
-munb = 3; phi = 0.2; size = k = 10
-x = 0:10
-(ii = dzinegbin(x, phi = phi, mu = munb, size = k))
-max(abs(cumsum(ii) - pzinegbin(x, phi = phi, mu = munb, size = k))) # Should be 0
-table(rzinegbin(100, phi = phi, mu = munb, size = k))
-
-table(qzinegbin(runif(1000), phi = phi, mu = munb, size = k))
-round(dzinegbin(x, phi = phi, mu = munb, size = k) * 1000) # Should be similar
-
-\dontrun{barplot(rbind(dzinegbin(x, phi = phi, mu = munb, size = k),
- dnbinom(x, mu = munb, size = k)),
- beside = TRUE, col = c("blue", "green"),
- main = paste("ZINB(mu = ", munb, ", k = ", k, ", phi = ", phi,
- ") (blue) vs negative binomial(mu = ", munb,
+munb = 3; pstr0 = 0.2; size = k = 10; x = 0:10
+(ii = dzinegbin(x, pstr0 = pstr0, mu = munb, size = k))
+max(abs(cumsum(ii) - pzinegbin(x, pstr0 = pstr0, mu = munb, size = k))) # 0
+table(rzinegbin(100, pstr0 = pstr0, mu = munb, size = k))
+
+table(qzinegbin(runif(1000), pstr0 = pstr0, mu = munb, size = k))
+round(dzinegbin(x, pstr0 = pstr0, mu = munb, size = k) * 1000) # Should be similar
+
+\dontrun{barplot(rbind(dzinegbin(x, pstr0 = pstr0, mu = munb, size = k),
+ dnbinom(x, mu = munb, size = k)), las = 1,
+ beside = TRUE, col = c("blue", "green"), ylab = "Probability",
+ main = paste("ZINB(mu = ", munb, ", k = ", k, ", pstr0 = ", pstr0,
+ ") (blue) vs NB(mu = ", munb,
", size = ", k, ") (green)", sep = ""),
names.arg = as.character(x)) }
}
diff --git a/man/zinegbinomial.Rd b/man/zinegbinomial.Rd
index f0e65e0..4462e8f 100644
--- a/man/zinegbinomial.Rd
+++ b/man/zinegbinomial.Rd
@@ -8,26 +8,29 @@
}
\usage{
-zinegbinomial(lphi = "logit", lmunb = "loge", lsize = "loge", ephi = list(),
- emunb = list(), esize = list(), iphi = NULL, isize = NULL, zero = -3,
- imethod = 1, shrinkage.init = 0.95, nsimEIM = 200)
+zinegbinomial(lpstr0 = "logit", lmunb = "loge", lsize = "loge",
+ epstr0 = list(), emunb = list(), esize = list(),
+ ipstr0 = NULL, isize = NULL, zero = c(-1, -3),
+ imethod = 1, shrinkage.init = 0.95, nsimEIM = 250)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{lphi, lmunb, lsize}{
- Link functions for the parameters \eqn{\phi}{phi},
+ \item{lpstr0, lmunb, lsize}{
+ Link functions for the parameters \eqn{\phi}{pstr0},
the mean and \eqn{k}; see \code{\link{negbinomial}} for details,
and \code{\link{Links}} for more choices.
+ For the zero-\emph{deflated} model see below.
+
}
- \item{ephi, emunb, esize}{
+ \item{epstr0, emunb, esize}{
List. Extra arguments for the respective links.
See \code{earg} in \code{\link{Links}} for general information.
}
- \item{iphi, isize}{
- Optional initial values for \eqn{\phi}{phi} and \eqn{k}{k}.
+ \item{ipstr0, isize}{
+ Optional initial values for \eqn{\phi}{pstr0} and \eqn{k}{k}.
The default is to compute an initial value internally for both.
If a vector then recycling is used.
@@ -42,8 +45,9 @@ zinegbinomial(lphi = "logit", lmunb = "loge", lsize = "loge", ephi = list(),
\item{zero}{
Integers specifying which linear/additive predictor is modelled
as intercepts only. If given, their absolute values must be
- either 1 or 2 or 3, and the default is only the \eqn{k} parameters
- (one for each response).
+ either 1 or 2 or 3.
+ The default is the \eqn{\phi}{pstr0} and \eqn{k} parameters
+ (both for each response).
See \code{\link{CommonVGAMffArguments}} for more information.
}
@@ -70,7 +74,7 @@ zinegbinomial(lphi = "logit", lmunb = "loge", lsize = "loge", ephi = list(),
Independent multivariate responses are handled.
- If so then arguments \code{iphi} and \code{isize} may be vectors
+ If so then arguments \code{ipstr0} and \code{isize} may be vectors
with length equal to the number of responses.
@@ -88,8 +92,8 @@ zinegbinomial(lphi = "logit", lmunb = "loge", lsize = "loge", ephi = list(),
\author{ T. W. Yee }
\note{
For intercept-models, the \code{misc} slot has a component called
- \code{p0} which is the estimate of \eqn{P(Y=0)}. Note that \eqn{P(Y=0)}
- is not the parameter \eqn{\phi}{phi}.
+ \code{pobs0} which is the estimate of \eqn{P(Y=0)}.
+ Note that \eqn{P(Y=0)} is not the parameter \eqn{\phi}{phi}.
If \eqn{k} is large then the use of \pkg{VGAM} family function
@@ -98,10 +102,13 @@ zinegbinomial(lphi = "logit", lmunb = "loge", lsize = "loge", ephi = list(),
negative binomial as \eqn{k} tends to infinity.
- The zero-\emph{deflated} negative binomial distribution cannot
- be handled with this family function. It can be handled
- with the zero-altered negative binomial distribution; see
- \code{\link{zanegbinomial}}.
+ The zero-\emph{deflated} negative binomial distribution
+ might be fitted by setting \code{lpstr0 = identity},
+ albeit, not entirely reliably. See \code{\link{zipoisson}}
+ for information that can be applied here. Else try
+ the zero-altered negative binomial distribution (see
+ \code{\link{zanegbinomial}}).
+
}
@@ -114,7 +121,7 @@ zinegbinomial(lphi = "logit", lmunb = "loge", lsize = "loge", ephi = list(),
If failure to converge occurs, try using combinations of
\code{imethod},
\code{shrinkage.init},
- \code{iphi},
+ \code{ipstr0},
\code{isize}, and/or
\code{zero} if there are explanatory variables.
@@ -129,30 +136,32 @@ zinegbinomial(lphi = "logit", lmunb = "loge", lsize = "loge", ephi = list(),
}
\examples{
# Example 1
-nbdat <- data.frame(x2 = runif(nn <- 1000))
-nbdat <- transform(nbdat, phi = logit(-0.5 + 1 * x2, inverse = TRUE),
- munb = exp(3 + x2),
- size = exp(0 + 2*x2))
-nbdat <- transform(nbdat, y1 = rzinegbin(nn, phi, mu = munb, size = size),
- y2 = rzinegbin(nn, phi, mu = munb, size = size))
-with(nbdat, table(y1)["0"] / sum(table(y1)))
-fit <- vglm(cbind(y1, y2) ~ x2, zinegbinomial(zero = NULL), nbdat)
+ndata <- data.frame(x2 = runif(nn <- 1000))
+ndata <- transform(ndata, pstr0 = logit(-0.5 + 1 * x2, inverse = TRUE),
+ munb = exp( 3 + 1 * x2),
+ size = exp( 0 + 2 * x2))
+ndata <- transform(ndata,
+ y1 = rzinegbin(nn, mu = munb, size = size, pstr0 = pstr0),
+ y2 = rzinegbin(nn, mu = munb, size = size, pstr0 = pstr0))
+with(ndata, table(y1)["0"] / sum(table(y1)))
+fit <- vglm(cbind(y1, y2) ~ x2, zinegbinomial(zero = NULL), ndata)
coef(fit, matrix = TRUE)
summary(fit)
-head(cbind(fitted(fit), with(nbdat, (1-phi) * munb)))
+head(cbind(fitted(fit), with(ndata, (1 - pstr0) * munb)))
round(vcov(fit), 3)
# Example 2: RR-ZINB could also be called a COZIVGLM-ZINB-2
-nbdat <- data.frame(x2 = runif(nn <- 2000))
-nbdat <- transform(nbdat, x3 = runif(nn))
-nbdat <- transform(nbdat, eta1 = 3 + 1 * x2 + 2 * x3)
-nbdat <- transform(nbdat, phi = logit(-1.5 + 0.5 * eta1, inverse = TRUE),
+ndata <- data.frame(x2 = runif(nn <- 2000))
+ndata <- transform(ndata, x3 = runif(nn))
+ndata <- transform(ndata, eta1 = 3 + 1 * x2 + 2 * x3)
+ndata <- transform(ndata, pstr0 = logit(-1.5 + 0.5 * eta1, inverse = TRUE),
munb = exp(eta1),
size = exp(4))
-nbdat <- transform(nbdat, y1 = rzinegbin(nn, phi, mu = munb, size = size))
-with(nbdat, table(y1)["0"] / sum(table(y1)))
-rrzinb <- rrvglm(y1 ~ x2 + x3, zinegbinomial(zero = NULL), nbdat,
+ndata <- transform(ndata,
+ y1 = rzinegbin(nn, pstr0 = pstr0, mu = munb, size = size))
+with(ndata, table(y1)["0"] / sum(table(y1)))
+rrzinb <- rrvglm(y1 ~ x2 + x3, zinegbinomial(zero = NULL), ndata,
Index.corner = 2, szero = 3, trace = TRUE)
coef(rrzinb, matrix = TRUE)
Coef(rrzinb)
diff --git a/man/zipf.Rd b/man/zipf.Rd
index 59ae7ad..8d4f4b3 100644
--- a/man/zipf.Rd
+++ b/man/zipf.Rd
@@ -15,8 +15,8 @@ zipf(N = NULL, link = "loge", earg = list(), init.s = NULL)
Number of elements, an integer satisfying \code{1 < N < Inf}.
The default is to use the maximum value of the response.
If given, \code{N} must be no less that the largest response value.
- If \code{N = Inf} and \eqn{s>1} then this is the zeta distribution
- (use \code{\link{zetaff}} instead).
+ If \code{N = Inf} and \eqn{s>1} then this is the zeta
+ distribution (use \code{\link{zetaff}} instead).
}
\item{link}{
@@ -38,43 +38,49 @@ zipf(N = NULL, link = "loge", earg = list(), init.s = NULL)
}
\details{
The probability function for a response \eqn{Y} is
- \deqn{P(Y=y) = y^{-s} / \sum_{i=1}^N i^{-s},\ \ \ s>0,\ \ \ y=1,2,\ldots,N,}{%
+ \deqn{P(Y=y) = y^{-s} / \sum_{i=1}^N i^{-s},\ \ s>0,\ \ y=1,2,\ldots,N,}{%
P(Y=y) = (y^(-s)) / sum((1:N)^(-s)), s>0, y=1,2,...,N,}
where \eqn{s} is the exponent characterizing the distribution.
The mean of \eqn{Y}, which are returned as the fitted values,
is \eqn{\mu = H_{N,s-1} / H_{N,s}}{H(N,s-1) / H(N,s)}
- where \eqn{H_{n,m}= \sum_{i=1}^n i^{-m}}{H(n,m)=sum((1:n)^(-m))} is the
- \eqn{n}th generalized harmonic number.
-
- Zipf's law is an experimental law which is often applied to the study
- of the frequency of words in a corpus of natural language utterances.
- It states that the frequency of any word is inversely proportional to
- its rank in the frequency table.
- For example, "the" and "of" are first two most common words, and
- Zipf's law states that "the" is twice as common as "of".
+ where \eqn{H_{n,m}= \sum_{i=1}^n i^{-m}}{H(n,m)=sum((1:n)^(-m))}
+ is the \eqn{n}th generalized harmonic number.
+
+
+ Zipf's law is an experimental law which is often applied
+ to the study of the frequency of words in a corpus of
+ natural language utterances. It states that the frequency
+ of any word is inversely proportional to its rank in the
+ frequency table. For example, \code{"the"} and \code{"of"}
+ are first two most common words, and Zipf's law states
+ that \code{"the"} is twice as common as \code{"of"}.
Many other natural phenomena conform to Zipf's law.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
- The object is used by modelling functions such as \code{\link{vglm}}
- and \code{\link{vgam}}.
+ The object is used by modelling functions such as
+ \code{\link{vglm}} and \code{\link{vgam}}.
+
}
\references{
-pp.526-- of Chapter 11 of
- Johnson N. L., Kemp, A. W. and Kotz S. (2005)
- \emph{Univariate Discrete Distributions},
- 3rd edition,
- Hoboken, New Jersey: Wiley.
+
+ pp.526-- of Chapter 11 of
+ Johnson N. L., Kemp, A. W. and Kotz S. (2005)
+ \emph{Univariate Discrete Distributions},
+ 3rd edition,
+ Hoboken, New Jersey: Wiley.
}
\author{ T. W. Yee }
\note{
Upon convergence, the \code{N} is stored as \code{@misc$N}.
+
}
\seealso{
@@ -83,13 +89,13 @@ pp.526-- of Chapter 11 of
}
\examples{
-zdata = data.frame(y = 1:5, w = c(63, 14, 5, 1, 2))
-fit = vglm(y ~ 1, zipf, zdata, trace = TRUE, weight = w, crit = "c")
+zdata = data.frame(y = 1:5, ofreq = c(63, 14, 5, 1, 2))
+fit = vglm(y ~ 1, zipf, zdata, trace = TRUE, weight = ofreq, crit = "coef")
fit = vglm(y ~ 1, zipf(link = identity, init = 3.4), zdata,
- trace = TRUE, weight = w)
+ trace = TRUE, weight = ofreq)
fit at misc$N
(shat = Coef(fit))
-with(zdata, weighted.mean(y, w))
+with(zdata, weighted.mean(y, ofreq))
fitted(fit, matrix = FALSE)
}
\keyword{models}
diff --git a/man/zipoisUC.Rd b/man/zipoisUC.Rd
index b46e038..f4a7f1c 100644
--- a/man/zipoisUC.Rd
+++ b/man/zipoisUC.Rd
@@ -8,14 +8,14 @@
\title{ Zero-Inflated Poisson Distribution }
\description{
Density, distribution function, quantile function and random generation
- for the zero-inflated Poisson distribution with parameter \code{phi}.
+ for the zero-inflated Poisson distribution with parameter \code{pstr0}.
}
\usage{
-dzipois(x, lambda, phi = 0, log = FALSE)
-pzipois(q, lambda, phi = 0)
-qzipois(p, lambda, phi = 0)
-rzipois(n, lambda, phi = 0)
+dzipois(x, lambda, pstr0 = 0, log = FALSE)
+pzipois(q, lambda, pstr0 = 0)
+qzipois(p, lambda, pstr0 = 0)
+rzipois(n, lambda, pstr0 = 0)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -23,9 +23,11 @@ rzipois(n, lambda, phi = 0)
\item{p}{vector of probabilities. }
\item{n}{number of observations. Must be a single positive integer. }
\item{lambda}{ Vector of positive means. }
- \item{phi}{
- Probability of zero (ignoring the Poisson distribution), called
- \eqn{\phi}{phi}. The default value of \eqn{\phi = 0}{phi = 0} corresponds
+ \item{pstr0}{
+ Probability of a structural zero
+ (i.e., ignoring the Poisson distribution),
+ called \eqn{\phi}{phi}.
+ The default value of \eqn{\phi = 0}{phi = 0} corresponds
to the response having an ordinary Poisson distribution.
}
@@ -35,7 +37,7 @@ rzipois(n, lambda, phi = 0)
The probability function of \eqn{Y} is 0 with probability
\eqn{\phi}{phi}, and \eqn{Poisson(\lambda)}{Poisson(lambda)} with
probability \eqn{1-\phi}{1-phi}. Thus
- \deqn{P(Y=0) =\phi + (1-\phi) P(W=0)}{%
+ \deqn{P(Y=0) =\phi + (1-\phi) P(W=0)}{%
P(Y=0) = phi + (1-phi) * P(W=0)}
where \eqn{W} is distributed \eqn{Poisson(\lambda)}{Poisson(lambda)}.
@@ -52,8 +54,20 @@ rzipois(n, lambda, phi = 0)
%\references{ }
\author{ Thomas W. Yee }
\note{
- The argument \code{phi} is recycled to the required length, and
- must have values which lie in the interval \eqn{[0,1]}.
+ The argument \code{pstr0} is recycled to the required length, and
+ must have values which lie in the interval \eqn{[0,1]}.
+
+
+ These functions actually allow for the \emph{zero-deflated
+ Poisson} distribution. Here, \code{pstr0} is also permitted
+ to lie in the interval \code{[-1/expm1(lambda), 0]}. The
+ resulting probability of a zero count is \emph{less than}
+ the nominal Poisson value, and the use of \code{pstr0} to
+ stand for the probability of a structural zero loses its
+ meaning. When \code{pstr0} equals \code{-1/expm1(lambda)}
+ this corresponds to the positive-Poisson distribution
+ (e.g., see \code{\link{dpospois}}).
+
}
@@ -65,18 +79,28 @@ rzipois(n, lambda, phi = 0)
}
\examples{
-lambda = 3; phi = 0.2; x = (-1):7
-(ii = dzipois(x, lambda, phi))
-max(abs(cumsum(ii) - pzipois(x, lambda, phi))) # Should be 0
-table(rzipois(100, lambda, phi))
+lambda = 3; pstr0 = 0.2; x = (-1):7
+(ii = dzipois(x, lambda, pstr0 = pstr0))
+max(abs(cumsum(ii) - pzipois(x, lambda, pstr0 = pstr0))) # Should be 0
+table(rzipois(100, lambda, pstr0 = pstr0))
-table(qzipois(runif(100), lambda, phi))
-round(dzipois(0:10, lambda, phi) * 100) # Should be similar
+table(qzipois(runif(100), lambda, pstr0))
+round(dzipois(0:10, lambda, pstr0 = pstr0) * 100) # Should be similar
\dontrun{ x = 0:10
-barplot(rbind(dzipois(x, lambda, phi), dpois(x, lambda)),
+par(mfrow = c(2, 1)) # Zero-inflated Poisson
+barplot(rbind(dzipois(x, lambda, pstr0 = pstr0), dpois(x, lambda)),
+ beside = TRUE, col = c("blue","orange"),
+ main = paste("ZIP(", lambda, ", pstr0 = ", pstr0, ") (blue) vs",
+ " Poisson(", lambda, ") (orange)", sep = ""),
+ names.arg = as.character(x))
+
+deflat_limit = -1 / expm1(lambda) # Zero-deflated Poisson
+newpstr0 = round(deflat_limit / 1.5, 3)
+barplot(rbind(dzipois(x, lambda, pstr0 = newpstr0),
+ dpois(x, lambda)),
beside = TRUE, col = c("blue","orange"),
- main = paste("ZIP(", lambda, ", phi = ", phi, ") (blue) vs",
+ main = paste("ZDP(", lambda, ", pstr0 = ", newpstr0, ") (blue) vs",
" Poisson(", lambda, ") (orange)", sep = ""),
names.arg = as.character(x)) }
}
diff --git a/man/zipoisson.Rd b/man/zipoisson.Rd
index 9ad2b0a..2a24753 100644
--- a/man/zipoisson.Rd
+++ b/man/zipoisson.Rd
@@ -13,23 +13,25 @@ zipoissonff(llambda = "loge", lprobp = "logit",
elambda = list(), eprobp = list(),
ilambda = NULL, iprobp = NULL, imethod = 1,
shrinkage.init = 0.8, zero = -2)
-zipoisson(lphi = "logit", llambda = "loge",
- ephi = list(), elambda = list(),
- iphi = NULL, ilambda = NULL, imethod = 1,
+zipoisson(lpstr0 = "logit", llambda = "loge",
+ epstr0 = list(), elambda = list(),
+ ipstr0 = NULL, ilambda = NULL, imethod = 1,
shrinkage.init = 0.8, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{lphi, llambda, ephi, elambda}{
+ \item{lpstr0, llambda, epstr0, elambda}{
Link function and extra argument for the parameter \eqn{\phi}{phi}
and the usual \eqn{\lambda}{lambda} parameter.
See \code{\link{Links}} for more choices,
and \code{earg} in \code{\link{Links}} for general information.
See \code{\link{CommonVGAMffArguments}} for more information.
+ For the zero-\emph{deflated} model see below.
+
}
- \item{iphi, ilambda}{
+ \item{ipstr0, ilambda}{
Optional initial values for \eqn{\phi}{phi}, whose values must lie
between 0 and 1.
Optional initial values for \eqn{\lambda}{lambda}, whose values must
@@ -49,7 +51,7 @@ zipoisson(lphi = "logit", llambda = "loge",
specifies the initialization method for \eqn{\lambda}{lambda}.
If failure to converge occurs try another value
and/or else specify a value for \code{shrinkage.init}
- and/or else specify a value for \code{iphi}.
+ and/or else specify a value for \code{ipstr0}.
See \code{\link{CommonVGAMffArguments}} for more information.
}
@@ -100,7 +102,7 @@ zipoisson(lphi = "logit", llambda = "loge",
(i) the order of the linear/additive predictors is switched so the
Poisson mean comes first;
(ii) \code{probp} is now the probability of the Poisson component,
- i.e., \code{probp} is \code{1-phi};
+ i.e., \code{probp} is \code{1-pstr0};
(iii) it can handle multiple responses;
(iv) argument \code{zero} has a new default so that the \code{probp}
is an intercept-only by default.
@@ -136,7 +138,7 @@ zipoisson(lphi = "logit", llambda = "loge",
Cambridge University Press: Cambridge.
- Yee, T. W. (2010)
+ Yee, T. W. (2012)
Two-parameter reduced-rank vector generalized linear models.
\emph{In preparation}.
@@ -150,9 +152,21 @@ zipoisson(lphi = "logit", llambda = "loge",
function currently cannot handle a multivariate response.
- The zero-\emph{deflated} Poisson distribution cannot be handled with
- this family function. It can be handled with the zero-altered Poisson
- distribution; see \code{\link{zapoisson}}.
+ Although the functions in \code{\link{Zipois}}
+ can handle the zero-\emph{deflated} Poisson distribution,
+ this family function cannot estimate this very well in general.
+ One sets \code{lpstr0 = identity}, however, the iterations
+ might fall outside the parameter space.
+ Practically, it is restricted to intercept-models only
+ (see example below).
+ Also, one might need inputting good initial values
+ or using a simpler model to obtain initial values.
+% If there is a covariate then it is best to
+% constrain \code{pstr0} to be intercept-only, e.g.,
+% by \code{zipoisson(lpstr0 = identity, zero = -1)}.
+ A (somewhat) similar and more reliable method for
+ zero-deflation is to try the zero-altered Poisson model
+ (see \code{\link{zapoisson}}).
The use of this \pkg{VGAM} family function with \code{\link{rrvglm}}
@@ -178,7 +192,7 @@ zipoisson(lphi = "logit", llambda = "loge",
If failure to converge occurs, try using combinations of
\code{imethod},
\code{shrinkage.init},
- \code{iphi}, and/or
+ \code{ipstr0}, and/or
\code{zipoisson(zero = 1)} if there are explanatory variables.
The default for \code{zipoissonff()} is to model the
structural zero probability as an intercept-only.
@@ -198,14 +212,14 @@ zipoisson(lphi = "logit", llambda = "loge",
\examples{
# Example 1: simulated ZIP data
zdata <- data.frame(x2 = runif(nn <- 2000))
-zdata <- transform(zdata, phi1 = logit(-0.5 + 1*x2, inverse = TRUE),
- phi2 = logit( 0.5 - 1*x2, inverse = TRUE),
- Phi1 = logit(-0.5 , inverse = TRUE),
- Phi2 = logit( 0.5 , inverse = TRUE),
- lambda1 = loge( 0.5 + 2*x2, inverse = TRUE),
+zdata <- transform(zdata, pstr01 = logit(-0.5 + 1*x2, inverse = TRUE),
+ pstr02 = logit( 0.5 - 1*x2, inverse = TRUE),
+ Ps01 = logit(-0.5 , inverse = TRUE),
+ Ps02 = logit( 0.5 , inverse = TRUE),
+ lambda1 = loge(-0.5 + 2*x2, inverse = TRUE),
lambda2 = loge( 0.5 + 2*x2, inverse = TRUE))
-zdata <- transform(zdata, y1 = rzipois(nn, lambda1, Phi1),
- y2 = rzipois(nn, lambda2, Phi2))
+zdata <- transform(zdata, y1 = rzipois(nn, lambda = lambda1, pstr0 = Ps01),
+ y2 = rzipois(nn, lambda = lambda2, pstr0 = Ps02))
with(zdata, table(y1)) # Eyeball the data
with(zdata, table(y2))
@@ -218,7 +232,6 @@ coef(fit2, matrix = TRUE) # These should agree with the above values
fit12 <- vglm(cbind(y1, y2) ~ x2, zipoissonff, zdata, crit = "coef")
coef(fit12, matrix = TRUE) # These should agree with the above values
-
# Example 2: McKendrick (1926). Data from 223 Indian village households
cholera <- data.frame(ncases = 0:4, # Number of cholera cases,
wfreq = c(168, 32, 16, 6, 1)) # Frequencies
@@ -226,25 +239,41 @@ fit <- vglm(ncases ~ 1, zipoisson, wei = wfreq, cholera, trace = TRUE)
coef(fit, matrix = TRUE)
with(cholera, cbind(actual = wfreq,
fitted = round(dzipois(ncases, lambda = Coef(fit)[2],
- phi = Coef(fit)[1]) *
+ pstr0 = Coef(fit)[1]) *
sum(wfreq), dig = 2)))
# Example 3: data from Angers and Biswas (2003)
abdata <- data.frame(y = 0:7, w = c(182, 41, 12, 2, 2, 0, 0, 1))
abdata <- subset(abdata, w > 0)
-fit <- vglm(y ~ 1, zipoisson(lphi = probit, iphi = 0.3),
+fit <- vglm(y ~ 1, zipoisson(lpstr0 = probit, ipstr0 = 0.8),
abdata, weight = w, trace = TRUE)
-fit at misc$prob0 # Estimate of P(Y = 0)
+fit at misc$pobs0 # Estimate of P(Y = 0)
coef(fit, matrix = TRUE)
-Coef(fit) # Estimate of phi and lambda
+Coef(fit) # Estimate of pstr0 and lambda
fitted(fit)
with(abdata, weighted.mean(y, w)) # Compare this with fitted(fit)
summary(fit)
-
-# Example 4: This RR-ZIP is known as a COZIGAM or COZIVGLM-ZIP
-rrzip <- rrvglm(Alopacce ~ bs(WaterCon), zipoissonff(zero = NULL),
- hspider, trace = TRUE)
+# Example 4: zero-deflated model for an intercept-only data
+zdata <- transform(zdata, lambda3 = loge( 0.0 , inverse = TRUE))
+zdata <- transform(zdata, deflat_limit = -1 / expm1(lambda3)) # Boundary
+# The 'pstr0' parameter is negative and in parameter space:
+zdata <- transform(zdata, usepstr0 = deflat_limit / 1.5)
+zdata <- transform(zdata, y3 = rzipois(nn, lambda3, pstr0 = usepstr0))
+head(zdata)
+with(zdata, table(y3)) # A lot of deflation
+fit3 <- vglm(y3 ~ 1, zipoisson(zero = -1, lpstr0 = identity),
+ zdata, trace = TRUE, crit = "coef")
+coef(fit3, matrix = TRUE)
+# Check how accurate it was:
+zdata[1, 'usepstr0'] # Answer
+coef(fit3)[1] # Estimate
+Coef(fit3)
+
+# Example 5: This RR-ZIP is known as a COZIGAM or COZIVGLM-ZIP
+set.seed(123)
+rrzip <- rrvglm(Alopacce ~ bs(WaterCon, df = 3), zipoisson(zero = NULL),
+ hspider, trace = TRUE, Index.corner = 2)
coef(rrzip, matrix = TRUE)
Coef(rrzip)
summary(rrzip)
@@ -253,7 +282,7 @@ summary(rrzip)
\keyword{models}
\keyword{regression}
-% Yee, T. W. (2010)
+% Yee, T. W. (2012)
% An alternative to quasi-Poisson vs. negative binomial
% regression: the reduced-rank negative binomial model.
% \emph{In preparation}.
diff --git a/src/caqo3.c b/src/caqo3.c
index c41c7d3..1f5ccec 100644
--- a/src/caqo3.c
+++ b/src/caqo3.c
@@ -178,7 +178,6 @@ void yiumjq3nn2howibc2a(double *objzgdk0, double *i9mwnvqt, double *lfu2qhid) {
void yiumjq3nbewf1pzv9(double *objzgdk0, double *lfu2qhid) {
- double pq0hfucn, xd4mybgj;
if (*objzgdk0 <= 2.0e-200) {
*lfu2qhid = -460.0e0;
@@ -218,8 +217,8 @@ void yiumjq3npkc4ejib(double w8znmyce[], double zshtfg8c[], double m0ibglfx[],
- int ayfnwr1v, yq6lorbx, gp1jxzuh, uw3favmo, sedf7mxb;
- double vogkfwt8, *fpdlcqk9zshtfg8c, *fpdlcqk9w8znmyce, *fpdlcqk9f9piukdx,
+ int ayfnwr1v, yq6lorbx, gp1jxzuh, sedf7mxb;
+ double *fpdlcqk9zshtfg8c, *fpdlcqk9w8znmyce, *fpdlcqk9f9piukdx,
*fpdlcqk9m0ibglfx, *fpdlcqk9vm4xjosb;
if (*vtsou9pz == 1) {
@@ -756,7 +755,7 @@ void yiumjq3nflncwkfq71(double lncwkfq7[], double w8znmyce[], int *ftnjamu2, int
int i0spbklx, ayfnwr1v, hpmwnav2, // sedf7mxb = *xwdf5ltg + 1,
hyqwtp6i = *xwdf5ltg * (*xwdf5ltg + 1) / 2;
- double tad5vhsu, uqnkc6zg, *fpdlcqk9lncwkfq7, *fpdlcqk9lncwkfq71, *fpdlcqk9lncwkfq72,
+ double *fpdlcqk9lncwkfq7, *fpdlcqk9lncwkfq71, *fpdlcqk9lncwkfq72,
*fpdlcqk9w8znmyce, *fpdlcqk9vm4xjosb, *fpdlcqk9kifxa0he;
int *wkumc9idtgiyxdw1, *wkumc9iddufozmt7;
@@ -884,10 +883,12 @@ void yiumjq3nflncwkfq72(double lncwkfq7[], double w8znmyce[], int *ftnjamu2, int
- int i0spbklx, ayfnwr1v, yq6lorbx, gp1jxzuh, hpmwnav2, g3psxjru, sedf7mxb = 0,
+
+ int i0spbklx, ayfnwr1v, yq6lorbx, gp1jxzuh, hpmwnav2, sedf7mxb = 0,
hyqwtp6i = *xwdf5ltg * (*xwdf5ltg + 1) / 2;
- double tad5vhsu, uqnkc6zg, *fpdlcqk9lncwkfq7, *fpdlcqk9lncwkfq71, *fpdlcqk9lncwkfq72,
- *fpdlcqk9w8znmyce, *fpdlcqk9vm4xjosb, *fpdlcqk9kifxa0he;
+ double uqnkc6zg, *fpdlcqk9lncwkfq7, *fpdlcqk9lncwkfq71, *fpdlcqk9lncwkfq72,
+ *fpdlcqk9w8znmyce, *fpdlcqk9vm4xjosb;
+
int *wkumc9idtgiyxdw1, *wkumc9iddufozmt7;
wkumc9idtgiyxdw1 = Calloc(hyqwtp6i, int);
@@ -1035,6 +1036,14 @@ void yiumjq3nietam6(double tlgduey8[], double m0ibglfx[], double y7sdgtqi[],
double *fpdlcqk9tlgduey8, *fpdlcqk9m0ibglfx, *fpdlcqk9m0ibglfx1, *fpdlcqk9m0ibglfx2,
*fpdlcqk9ufgqj9ck;
+
+ fpdlcqk9m0ibglfx =
+ fpdlcqk9m0ibglfx1 =
+ fpdlcqk9m0ibglfx2 = &tad5vhsu;
+ gyuq8dex = 1.0;
+
+
+
fpdlcqk9tlgduey8 = tlgduey8 + (*hj3ftvzu-1) * *ftnjamu2;
fpdlcqk9ufgqj9ck = ufgqj9ck;
if (*qfx3vhct == 3 || *qfx3vhct == 5) {
@@ -1163,7 +1172,7 @@ void yiumjq3ndlgpwe0c(double tlgduey8[], double ufgqj9ck[], double m0ibglfx[],
int *ftnjamu2, int *wy1vqfzu, int *afpc0kns, int *br5ovgcj, int *npjlv3mr,
int *hj3ftvzu, int *qfx3vhct, int *zjkrtol8, int *unhycz0e, double vm4xjosb[]) {
- int ayfnwr1v, lbgwvp3q; //qfx3vhct # kvowz9ht
+ int ayfnwr1v, lbgwvp3q = -7; //qfx3vhct # kvowz9ht
double xd4mybgja, xd4mybgjb, xd4mybgjc, anopu9vi;
double *fpdlcqk9m0ibglfx, *fpdlcqk9m0ibglfx1, *fpdlcqk9m0ibglfx2, *fpdlcqk9t8hwvalr,
*fpdlcqk9vm4xjosb, *fpdlcqk9wpuarq2m, *fpdlcqk9ufgqj9ck, *fpdlcqk9rbne6ouj,
@@ -1178,6 +1187,25 @@ void yiumjq3ndlgpwe0c(double tlgduey8[], double ufgqj9ck[], double m0ibglfx[],
double tmp1;
+
+
+ fpdlcqk9m0ibglfx =
+ fpdlcqk9m0ibglfx1 =
+ fpdlcqk9m0ibglfx2 = &xd4mybgja;
+ lbgwvp3q += 7;
+ lbgwvp3q *= lbgwvp3q;
+
+
+
+
+
+
+
+
+
+
+
+
n2kersmx = 0.990e0;
n2kersmx = 0.995e0;
@@ -1449,21 +1477,24 @@ void cqo_2(double lncwkfq7[], double tlgduey8[], double kifxa0he[],
- int ayfnwr1v, yq6lorbx, gp1jxzuh, uw3favmo, bpvaqm5z, g3psxjru, yu6izdrc = 0,
+
+ int ayfnwr1v, yq6lorbx, gp1jxzuh, bpvaqm5z, yu6izdrc = 0,
kcm6jfob, fmzq7aob, xwdf5ltg, kvowz9ht, f7svlajr, qfx3vhct, c5aesxkul, pqneb2ra = 1;
- int ybnsqgo9, algpft4y, qemj9asg, xlpjcg3s, eu3oxvyb, vtsou9pz, unhycz0e, zaupqv9b, wr0lbopv;
+ int ybnsqgo9, algpft4y, qemj9asg, xlpjcg3s, eu3oxvyb, vtsou9pz, unhycz0e, wr0lbopv;
double dn3iasxug, wiptsjx8, bh2vgiay, pvofyg8z = 1.0e-7, uylxqtc7 = 0.0,
uaf2xgqy, vsoihn1r, rsynp1go; // rpto5qwb,
double *qnwamo0e1, *fpdlcqk9w8znmyce,
*fpdlcqk9m0ibglfx, *fpdlcqk9vm4xjosb, *fpdlcqk9vc6hatuj, *fpdlcqk9wpuarq2m, *fpdlcqk9ghz9vuba;
- double hmayv1xt1 = 1.0, hmayv1xt2 = 0.0;
+ double hmayv1xt1 = 10.0, hmayv1xt2 = 0.0;
int x1jrewny = 0;
+
double *wkumc9idrpto5qwb, *wkumc9idtwk;
wkumc9idrpto5qwb = Calloc(1 + *afpc0kns , double);
wkumc9idtwk = Calloc(*wy1vqfzu * *ftnjamu2 * 2, double);
+
xwdf5ltg = xui7hqwl[0];
fmzq7aob = xui7hqwl[1];
xlpjcg3s = xui7hqwl[2];
@@ -1475,7 +1506,6 @@ void cqo_2(double lncwkfq7[], double tlgduey8[], double kifxa0he[],
eu3oxvyb = xui7hqwl[10];
vtsou9pz = xui7hqwl[11];
unhycz0e = xui7hqwl[13];
- zaupqv9b = xui7hqwl[14];
wr0lbopv = xui7hqwl[17];
dn3iasxug = y7sdgtqi[0];
uaf2xgqy = sqrt(dn3iasxug);
@@ -1484,6 +1514,18 @@ void cqo_2(double lncwkfq7[], double tlgduey8[], double kifxa0he[],
bh2vgiay = y7sdgtqi[1];
rsynp1go = y7sdgtqi[2];
+
+
+
+
+ hmayv1xt1 -= bh2vgiay;
+ hmayv1xt2 -= rsynp1go;
+ hmayv1xt1 += hmayv1xt2;
+
+
+
+
+
*zjkrtol8 = 1;
yiumjq3nflncwkfq72(lncwkfq7, w8znmyce, ftnjamu2, wy1vqfzu,
@@ -1612,8 +1654,10 @@ void cqo_2(double lncwkfq7[], double tlgduey8[], double kifxa0he[],
if (unhycz0e == 1) {
if (qfx3vhct == 3 || qfx3vhct == 5) {
+
if (2 * *afpc0kns != *wy1vqfzu) //Rprintf
Rprintf("Error: 2 * *afpc0kns != *wy1vqfzu in C_cqo_2\n");
+
fpdlcqk9m0ibglfx = m0ibglfx;
fpdlcqk9vm4xjosb = vm4xjosb;
for (ayfnwr1v = 1; ayfnwr1v <= *ftnjamu2; ayfnwr1v++) {
@@ -1702,9 +1746,11 @@ void cqo_1(double lncwkfq7[], double tlgduey8[],
- int ayfnwr1v, hj3ftvzu, yu6izdrc = 0, pqneb2ra = 1,
- kcm6jfob, fmzq7aob, unhycz0e, xwdf5ltg, kvowz9ht, f7svlajr, qfx3vhct, c5aesxkul,
- ybnsqgo9, algpft4y, qemj9asg, xlpjcg3s, vtsou9pz, zaupqv9b, yru9olks, wr0lbopv;
+
+
+ int ayfnwr1v, hj3ftvzu, yu6izdrc = 0, pqneb2ra = 1, wr0lbopv,
+ kcm6jfob, unhycz0e, xwdf5ltg, kvowz9ht, f7svlajr, qfx3vhct, c5aesxkul,
+ ybnsqgo9, algpft4y, qemj9asg, xlpjcg3s, vtsou9pz, yru9olks;
double dn3iasxug, wiptsjx8, pvofyg8z = 1.0e-7, uylxqtc7 = 0.0,
bh2vgiay, uaf2xgqy, vsoihn1r, rsynp1go, rpto5qwb;
double *fpdlcqk9zshtfg8c, *fpdlcqk9w8znmyce,
@@ -1713,14 +1759,15 @@ void cqo_1(double lncwkfq7[], double tlgduey8[],
*fpdlcqk9wpuarq2m, *fpdlcqk9wpuarq2m1, *fpdlcqk9wpuarq2m2,
*fpdlcqk9ghz9vuba1, *fpdlcqk9ghz9vuba2;
- int gp1jxzuh, loopCnt, loopCnt2;
+
+
+ int gp1jxzuh;
double hmayv1xt = 2.0, Totdev = 0.0e0;
double *wkumc9idtwk;
wkumc9idtwk = Calloc(*br5ovgcj * 3 , double);
xwdf5ltg = xui7hqwl[0];
- fmzq7aob = xui7hqwl[1];
xlpjcg3s = xui7hqwl[2];
kvowz9ht = xui7hqwl[3];
f7svlajr = xui7hqwl[4];
@@ -1740,9 +1787,8 @@ void cqo_1(double lncwkfq7[], double tlgduey8[],
return;
}
unhycz0e = xui7hqwl[13];
- zaupqv9b = xui7hqwl[14];
yru9olks = xui7hqwl[15];
- wr0lbopv = xui7hqwl[17];
+ wr0lbopv = xui7hqwl[17]; //20120222; correct but unused.
dn3iasxug = y7sdgtqi[0];
uaf2xgqy = sqrt(dn3iasxug);
@@ -1753,6 +1799,13 @@ void cqo_1(double lncwkfq7[], double tlgduey8[],
+ hmayv1xt -= rsynp1go;
+ hmayv1xt += hmayv1xt;
+
+
+
+
+
yiumjq3nflncwkfq71(lncwkfq7, w8znmyce, ftnjamu2, &xwdf5ltg,
&qfx3vhct, vm4xjosb, br5ovgcj, &xlpjcg3s,
@@ -2013,8 +2066,11 @@ void dcqo1(double lncwkfq7[], double tlgduey8[], double kifxa0he[],
+
+
int ayfnwr1v, gp1jxzuh, xvr7bonh, hpmwnav2, idlosrw8, xwdf5ltg = xui7hqwl[ 0],
- vtsou9pz, wr0lbopv, exrkcn5d = xui7hqwl[12];
+ vtsou9pz;
+ int exrkcn5d = xui7hqwl[12];
double fxnhilr3, *fpdlcqk9k7hulceq,
*fpdlcqk9kpzavbj3mat, *fpdlcqk9lncwkfq7, *fpdlcqk9yxiwebc5, *fpdlcqk9atujnxb8;
@@ -2025,9 +2081,9 @@ void dcqo1(double lncwkfq7[], double tlgduey8[], double kifxa0he[],
fpdlcqk9kpzavbj3mat = kpzavbj3mat;
+
idlosrw8 = xui7hqwl[ 4];
vtsou9pz = xui7hqwl[11];
- wr0lbopv = xui7hqwl[17];
fpdlcqk9lncwkfq7 = lncwkfq7;
fpdlcqk9yxiwebc5 = wkumc9idyxiwebc5;
@@ -2145,25 +2201,27 @@ void vcao6(double lncwkfq7[], double tlgduey8[], double ufgqj9ck[],
+
int hj3ftvzu, ehtjigf4, kvowz9ht, yu6izdrc = 0, pqneb2ra = 1, xwdf5ltg = xui7hqwl[0],
- f7svlajr, qfx3vhct, c5aesxkul, wr0lbopv, vtsou9pz, zaupqv9b, xlpjcg3s,
+ f7svlajr, qfx3vhct, c5aesxkul, wr0lbopv, vtsou9pz, xlpjcg3s,
sedf7mxb, kcm6jfob, lensmo = (xwdf5ltg == 1 ? 2 : 4) * *afpc0kns;
double rpto5qwb, dn3iasxug, wiptsjx8, bh2vgiay, uaf2xgqy, vsoihn1r,
- rsynp1go, fjcasv7g[6], zpcqv3uj, ghdetj8v = 0.0;
+ rsynp1go, fjcasv7g[6], ghdetj8v = 0.0;
double *fpdlcqk9kispwgx3;
+
double hmayv1xt = 0.0, Totdev = 0.0e0;
+
int qes4mujl, ayfnwr1v, kij0gwer, xumj5dnk, lyma1kwc; // = xui7hqwl[10];
double hmayv1xtvm4xjosb[2];
- double *fpdlcqk9ui8ysltq, *fpdlcqk9lxyst1eb, *fpdlcqk9zyodca3j,
+ double *fpdlcqk9lxyst1eb, *fpdlcqk9zyodca3j,
*fpdlcqk9m0ibglfx1, *fpdlcqk9m0ibglfx2, *fpdlcqk9wpuarq2m1, *fpdlcqk9wpuarq2m2;
double *wkumc9idui8ysltq, *wkumc9idlxyst1eb, *wkumc9idzyodca3j;
- double *fpdlcqk9hdnw2fts, *fpdlcqk9wbkq9zyi;
double *wkumc9idhdnw2fts, *wkumc9idwbkq9zyi;
@@ -2203,15 +2261,21 @@ void vcao6(double lncwkfq7[], double tlgduey8[], double ufgqj9ck[],
Free(wkumc9idhdnw2fts); Free(wkumc9idwbkq9zyi);
return;
}
- zaupqv9b = xui7hqwl[14];
wr0lbopv = xui7hqwl[17];
- zpcqv3uj = y7sdgtqi[3 + *afpc0kns + *afpc0kns + 1];
dn3iasxug = y7sdgtqi[0];
uaf2xgqy = sqrt(dn3iasxug);
vsoihn1r = log(dn3iasxug);
bh2vgiay = y7sdgtqi[1];
rsynp1go = y7sdgtqi[2];
+
+
+ hmayv1xt += hmayv1xt;
+ hmayv1xt *= hmayv1xt;
+
+
+
+
*zjkrtol8 = 1;
for (hj3ftvzu = 1; hj3ftvzu <= *afpc0kns; hj3ftvzu++) {
@@ -2380,6 +2444,10 @@ void vcao6(double lncwkfq7[], double tlgduey8[], double ufgqj9ck[],
+
+
+
+
void vdcao6(double lncwkfq7[], double tlgduey8[], double ufgqj9ck[],
double m0ibglfx[], double t8hwvalr[], double ghz9vuba[],
double rbne6ouj[], double wpuarq2m[],
@@ -2406,19 +2474,20 @@ void vdcao6(double lncwkfq7[], double tlgduey8[], double ufgqj9ck[],
- int ayfnwr1v, xvr7bonh, hpmwnav2, idlosrw8, exrkcn5d, xwdf5ltg = xui7hqwl[ 0],
- vtsou9pz, wr0lbopv;
+
+ int ayfnwr1v, xvr7bonh, hpmwnav2, idlosrw8, xwdf5ltg = xui7hqwl[ 0],
+ vtsou9pz;
double fxnhilr3;
+
double ghdetj8v = 0.0e0, ydcnh9xl = y7sdgtqi[3 + *afpc0kns + *afpc0kns + 3 -1];
double *fpdlcqk9k7hulceq, *fpdlcqk9kpzavbj3mat, *fpdlcqk9lncwkfq7, *fpdlcqk9yxiwebc5,
*fpdlcqk9atujnxb8;
double *wkumc9idyxiwebc5;
double *wkumc9idlxyst1eb, *wkumc9idzyodca3j;
- double *wkumc9idhdnw2fts, *wkumc9idwbkq9zyi;
double *wkumc9iddev0;
wkumc9idyxiwebc5 = Calloc(*ftnjamu2 * xwdf5ltg , double);
@@ -2430,8 +2499,8 @@ void vdcao6(double lncwkfq7[], double tlgduey8[], double ufgqj9ck[],
idlosrw8 = xui7hqwl[ 4];
vtsou9pz = xui7hqwl[11];
- exrkcn5d = xui7hqwl[12];
- wr0lbopv = xui7hqwl[17];
+
+
fpdlcqk9lncwkfq7 = lncwkfq7;
fpdlcqk9yxiwebc5 = wkumc9idyxiwebc5;
diff --git a/src/gautr.c b/src/gautr.c
index c4fae28..ef21705 100644
--- a/src/gautr.c
+++ b/src/gautr.c
@@ -322,7 +322,7 @@ main()
for(i = -9; i<=9; i++)
{
r = i / 10.0;
- printf("%10.2f %10.6f \n",r,bivnor(x,y,r));
+ Rprintf("%10.2f %10.6f \n",r,bivnor(x,y,r));
}
diff --git a/src/lms.f b/src/lms.f
index 05953c2..9ad9072 100644
--- a/src/lms.f
+++ b/src/lms.f
@@ -81,7 +81,8 @@
double precision ghz9vuba, i9mwnvqt, mymu, sigma, kpzavbj3mat(4),
&lfu2qhid
integer hbsl0gto, itwo2
- double precision psi, pim12, tad5vhsu(3), two12
+ double precision psi, pim12, two12
+ double precision tad5vhsu(3)
itwo2 = 2
hbsl0gto = 1
if(.not.(lenkpzavbj3mat .gt. 0))goto 23016
@@ -104,7 +105,8 @@
double precision ghz9vuba, i9mwnvqt, mymu, sigma, kpzavbj3mat(4),
&lfu2qhid
integer hbsl0gto, itwo2
- double precision psi, mtpim12, tad5vhsu(3), two12
+ double precision psi, mtpim12, two12
+ double precision tad5vhsu(3)
itwo2 = 2
hbsl0gto = 1
if(.not.(lenkpzavbj3mat .gt. 0))goto 23018
diff --git a/src/muxr.c b/src/muxr.c
index 2f50c92..4ad6d97 100644
--- a/src/muxr.c
+++ b/src/muxr.c
@@ -1,11 +1,67 @@
/*
This code is
-Copyright (C) 1998-2005 T. W. Yee, University of Auckland. All rights reserved.
+Copyright (C) 1998-2012 T. W. Yee, University of Auckland. All rights reserved.
*/
-#include <stdio.h>
-#include <math.h>
+#include<math.h>
+#include<stdio.h>
+#include<stdlib.h>
+#include<R.h>
+#include<Rmath.h>
+
+
+void vdec(int *row_index, int *col_index, int *dimm);
+void m2a(double *m, double *a, int *dimm, int *row_index,
+ int *col_index, int *n, int *M, int *upper);
+void a2m(double *a, double *m, int *dimm, int *row_index,
+ int *col_index, int *n, int *M);
+void mux2(double *cc, double *ymat,
+ double *ans, int *p, int *n, int *M);
+void mux22(double *cc, double *ymat, double *ans, int *dimm,
+ int *row_index, int *col_index,
+ int *n, int *M, double *wk, int *upper);
+void mux5(double *cc, double *x,
+ double *ans, int *M, int *n, int *r,
+ int *dimm,
+ int *dimr,
+ int *matrix,
+ double *wk, double *wk2,
+ int *row_index_M, int *col_index_M,
+ int *row_index_r, int *col_index_r);
+void mux55(double *evects, double *evals, double *ans, double *wk,
+ double *wk2, int *row_index, int *col_index,
+ int *M, int *n);
+void mux7(double *cc, double *x,
+ double *ans, int *M, int *q, int *n, int *r);
+void mux111(double *cc, double *txmat, int *M, int *R, int *n,
+ double *wk, double *wk2, int *row_index, int *col_index,
+ int *dimm, int *upper);
+void mux15(double *cc, double *x,
+ double *ans, int *M, int *n);
+void vchol(double *cc, int *M, int *n, int *ok, double *wk,
+ int *row_index, int *col_index, int *dimm);
+void vforsub(double *cc, double *b, int *M, int *n,
+ double *wk, int *row_index,
+ int *col_index, int *dimm);
+void vbacksub(double *cc, double *b, int *M, int *n,
+ double *wk, int *row_index,
+ int *col_index, int *dimm);
+void tapplymat1(double *mat, int *nr, int *nc, int *type);
+
+
+
+
+
+
+
+
+
+
+
+
+
+
void vdec(int *row_index, int *col_index, int *dimm)
@@ -128,7 +184,7 @@ void mux5(double *cc, double *x,
pd2 = wk2;
} else {
/* Commented out on 2/5/06. Need to fix this up more cleanly.
- printf("Error: can only handle matrix.arg == 1\n");
+ Rprintf("Error: can only handle matrix.arg == 1\n");
exit(-1);
*/
@@ -435,12 +491,12 @@ void tapplymat1(double *mat, int *nr, int *nc, int *type)
double *pd = mat, *pd2 = mat + *nr;
int i, j;
- if(*type==1)
+ if(*type == 1)
for(j = 2; j <= *nc; j++)
for(i = 0; i < *nr; i++, pd2++)
*pd2 += *pd++;
- if(*type==2)
+ if(*type == 2)
{
pd2 = mat + *nr * *nc - 1;
pd = pd2 - *nr;
@@ -449,12 +505,12 @@ void tapplymat1(double *mat, int *nr, int *nc, int *type)
*pd2 -= *pd--;
}
- if(*type==3)
+ if(*type == 3)
for(j = 2; j <= *nc; j++)
for(i = 0; i < *nr; i++, pd2++)
*pd2 *= *pd++;
if(*type < 1 || *type > 3)
- printf("Error: *type not matched\n");
+ Rprintf("Error: *type not matched\n");
}
diff --git a/src/rgam3.c b/src/rgam3.c
index 9c0be8b..ae35250 100644
--- a/src/rgam3.c
+++ b/src/rgam3.c
@@ -135,6 +135,19 @@ void n5aioudkhbzuprs6(double *qgnl3toc,
double yjpnro8d = 8.0e88, bk3ymcih = 0.0e0,
*qcpiaj7f, qcpiaj7f0 = 0.0;
+
+
+ g2dnwteb = bk3ymcih;
+ bk3ymcih += bk3ymcih;
+ bk3ymcih *= bk3ymcih;
+ bk3ymcih += g2dnwteb;
+
+
+
+
+
+
+
qcpiaj7f = &qcpiaj7f0,
diff --git a/src/vgam3.c b/src/vgam3.c
index 52050c5..39f877b 100644
--- a/src/vgam3.c
+++ b/src/vgam3.c
@@ -1435,7 +1435,7 @@ void Yee_vbfa(int psdvgce3[], double *fjcasv7g, double he7mqnvy[], double tlgdue
double *ghdetj8v, *zpcqv3uj;
- int nhja0izq, rutyk8mg, xjc4ywlh, lyma1kwc, lyzoe1rsp, ueb8hndv, gtrlbz3e, algpft4y = 0,
+ int nhja0izq, rutyk8mg, xjc4ywlh, lyzoe1rsp, ueb8hndv, gtrlbz3e, algpft4y = 0,
qemj9asg, npjlv3mr, kvowz9ht, ldk, fbd5yktj = 0;
@@ -1446,6 +1446,7 @@ void Yee_vbfa(int psdvgce3[], double *fjcasv7g, double he7mqnvy[], double tlgdue
+
int *ftnjamu2, *wy1vqfzu;
int itdcb8ilk[1];
double tdcb8ilk[4];
@@ -1465,7 +1466,6 @@ void Yee_vbfa(int psdvgce3[], double *fjcasv7g, double he7mqnvy[], double tlgdue
rutyk8mg = psdvgce3[8];
xjc4ywlh = psdvgce3[9];
- lyma1kwc = psdvgce3[10];
kvowz9ht = psdvgce3[11];
npjlv3mr = psdvgce3[12];
ldk = psdvgce3[14];
diff --git a/src/vmux3.c b/src/vmux3.c
index 90a37c2..58ab442 100644
--- a/src/vmux3.c
+++ b/src/vmux3.c
@@ -137,7 +137,7 @@ void fvlmz9iyC_nudh6szq(double wpuarq2m[], double tlgduey8[], double bzmd6ftvmat
int *wkumc9idtgiyxdw1, *wkumc9iddufozmt7;
double q6zdcwxk;
- double *wkumc9idwk12, *qnwamo0e;
+ double *wkumc9idwk12;
wkumc9idwk12 = Calloc(zyojx5hw, double);
wkumc9idtgiyxdw1 = Calloc(imk5wjxg, int);
@@ -174,7 +174,7 @@ void fvlmz9iyC_vbks(double wpuarq2m[], double unvxka0m[],
zyojx5hw = *wy1vqfzu * *wy1vqfzu,
imk5wjxg = *wy1vqfzu * (*wy1vqfzu + 1) / 2;
int *wkumc9idtgiyxdw1, *wkumc9iddufozmt7;
- double q6zdcwxk, *qnwamo0e;
+ double q6zdcwxk;
double *wkumc9idwk12;
wkumc9idwk12 = Calloc(zyojx5hw , double);
@@ -278,7 +278,7 @@ void fvlmz9iyC_mxrbkut0(double wpuarq2m[], double he7mqnvy[],
double q6zdcwxk;
int ayfnwr1v, yq6lorbx, gp1jxzuh, bpvaqm5z;
- double *wkumc9idwk12, *wkumc9idwk34, *qnwamo0e;
+ double *wkumc9idwk12, *wkumc9idwk34;
int *wkumc9idtgiyxdw1, *wkumc9iddufozmt7,
imk5wjxg = *wy1vqfzu * (*wy1vqfzu + 1) / 2,
zyojx5hw = *wy1vqfzu * *wy1vqfzu,
@@ -331,7 +331,7 @@ void fvlmz9iyC_lkhnw9yq(double wpuarq2m[], double ks3wejcv[],
int ayfnwr1v, yq6lorbx, gp1jxzuh, uaoynef0,
zyojx5hw = *wy1vqfzu * *wy1vqfzu;
- double q6zdcwxk, *qnwamo0e, vn3iasxugno = 1.0e-14;
+ double q6zdcwxk, vn3iasxugno = 1.0e-14;
double *wkumc9idwrk;
wkumc9idwrk = Calloc(zyojx5hw, double);
@@ -402,7 +402,20 @@ void fvlmz9iyC_enbin9(double bzmd6ftvmat[], double hdqsx7bk[], double nm0eljqk[]
int ayfnwr1v, kij0gwer, esql7umk;
double vjz5sxty, pvcjl2na, mwuvskg1, btiehdm2 = 100.0e0 * *rsynp1go,
- ydb, ft3ijqmy, bk3ymcih, q6zdcwxk, plo6hkdr, csi9ydge, oxjgzv0e = 0.001e0;
+ ydb, ft3ijqmy, q6zdcwxk, plo6hkdr, csi9ydge, oxjgzv0e = 0.001e0;
+ double bk3ymcih = -1.0;
+
+
+
+
+ csi9ydge = bk3ymcih;
+ bk3ymcih += bk3ymcih;
+ bk3ymcih += csi9ydge;
+
+
+
+
+
if (*n2kersmx <= 0.80e0 || *n2kersmx >= 1.0e0) {
Rprintf("Error in fvlmz9iyC_enbin9: bad n2kersmx value.\n");
@@ -484,10 +497,9 @@ void fvlmz9iyC_enbin8(double bzmd6ftvmat[], double hdqsx7bk[], double hsj9bzaq[]
- int ayfnwr1v, kij0gwer, esql7umk = 3000;
+ int ayfnwr1v, kij0gwer;
double ft3ijqmy, tad5vhsu, o3jyipdf, pq0hfucn, q6zdcwxk,
- plo6hkdr, qtce8hzo1 = 0.0e0, qtce8hzo2 = 0.0e0,
- hnu1vjyw = 1.0e0 - *rsynp1go;
+ plo6hkdr, qtce8hzo1 = 0.0e0, qtce8hzo2 = 0.0e0;
int fw2rodat, rx8qfndg, mqudbv4y;
double onemse, nm0eljqk, ydb, btiehdm2 = -100.0 * *rsynp1go,
kbig = 1.0e4, oxjgzv0e = 0.0010;
@@ -652,7 +664,7 @@ void VGAM_C_mux34(double he7mqnvy[], double Dmat[], int *vnc1izfy, int *e0nmabdk
int ayfnwr1v, yq6lorbx, gp1jxzuh;
- double *qnwamo0e1, *qnwamo0e2, *qnwamo0e3;
+ double *qnwamo0e1, *qnwamo0e2;
if (*e0nmabdk == 1) {
qnwamo0e1 = bqelz3cy; qnwamo0e2 = he7mqnvy;
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-vgam.git
More information about the debian-science-commits
mailing list