[r-cran-vgam] 31/63: Import Upstream version 0.9-0
Andreas Tille
tille at debian.org
Tue Jan 24 13:54:30 UTC 2017
This is an automated email from the git hooks/post-receive script.
tille pushed a commit to branch master
in repository r-cran-vgam.
commit 85cf7bbb8349d8c8ef7e43ea255f70a5fae6758d
Author: Andreas Tille <tille at debian.org>
Date: Tue Jan 24 14:16:56 2017 +0100
Import Upstream version 0.9-0
---
DESCRIPTION | 8 +-
MD5 | 829 +--
NAMESPACE | 29 +-
NEWS | 87 +-
R/Links.R | 236 +
R/aamethods.q | 29 +-
R/coef.vlm.q | 123 +-
R/deviance.vlm.q | 100 +-
R/family.actuary.R | 2090 +++++++
R/family.aunivariate.R | 1621 +++--
R/family.basics.R | 185 +-
R/family.binomial.R | 2246 +++----
R/family.bivariate.R | 2634 ++++----
R/family.categorical.R | 1071 ++--
R/family.censored.R | 1263 ++--
R/family.circular.R | 590 +-
R/family.exp.R | 64 +-
R/family.extremes.R | 2855 +++++----
R/family.genetic.R | 935 +--
R/family.glmgam.R | 1603 ++---
R/family.loglin.R | 487 +-
R/family.mixture.R | 1132 ++--
R/family.nonlinear.R | 181 +-
R/family.normal.R | 2545 +++++---
R/family.others.R | 1398 ++---
R/family.positive.R | 448 +-
R/family.qreg.R | 5877 +++++++++---------
R/family.quantal.R | 46 +-
R/{family.rcam.R => family.rcim.R} | 400 +-
R/family.rcqo.R | 106 +-
R/family.robust.R | 288 +-
R/family.rrr.R | 24 +-
R/family.survival.R | 87 +-
R/family.ts.R | 133 +-
R/family.univariate.R | 11492 +++++++++++++++++++----------------
R/family.zeroinf.R | 954 +--
R/links.q | 2042 ++++---
R/logLik.vlm.q | 16 +-
R/model.matrix.vglm.q | 59 +-
R/nobs.R | 14 +-
R/plot.vglm.q | 69 +-
R/predict.vgam.q | 49 +-
R/predict.vglm.q | 134 +-
R/predict.vlm.q | 8 +-
R/qtplot.q | 165 +-
R/residuals.vlm.q | 108 +-
R/rrvglm.control.q | 37 +-
R/s.vam.q | 60 +-
R/smart.R | 18 +-
R/summary.vglm.q | 188 +-
R/vsmooth.spline.q | 23 +-
data/alclevels.rda | Bin 567 -> 551 bytes
data/alcoff.rda | Bin 563 -> 546 bytes
data/auuc.rda | Bin 245 -> 246 bytes
data/backPain.rda | Bin 487 -> 484 bytes
data/car.all.rda | Bin 6979 -> 6961 bytes
data/crashbc.rda | Bin 392 -> 375 bytes
data/crashf.rda | Bin 358 -> 341 bytes
data/crashi.rda | Bin 508 -> 491 bytes
data/crashmc.rda | Bin 401 -> 386 bytes
data/crashp.rda | Bin 393 -> 376 bytes
data/crashtr.rda | Bin 379 -> 362 bytes
data/crime.us.rda | Bin 3976 -> 3976 bytes
data/datalist | 1 +
data/fibre15.rda | Bin 247 -> 247 bytes
data/fibre1dot5.rda | Bin 296 -> 298 bytes
data/finney44.rda | Bin 209 -> 210 bytes
data/gala.rda | Bin 1051 -> 1052 bytes
data/hormone.txt.bz2 | Bin 0 -> 342 bytes
data/hspider.rda | Bin 1344 -> 1344 bytes
data/hued.rda | Bin 414 -> 415 bytes
data/huie.rda | Bin 418 -> 419 bytes
data/huse.rda | Bin 324 -> 324 bytes
data/leukemia.rda | Bin 329 -> 329 bytes
data/marital.nz.rda | Bin 10504 -> 10504 bytes
data/mmt.rda | Bin 4222 -> 4222 bytes
data/pneumo.rda | Bin 267 -> 268 bytes
data/rainfall.rda | Bin 11063 -> 11062 bytes
data/ruge.rda | Bin 257 -> 258 bytes
data/toxop.rda | Bin 473 -> 474 bytes
data/ugss.rda | Bin 11579 -> 11588 bytes
data/venice.rda | Bin 976 -> 983 bytes
data/venice90.rda | Bin 8072 -> 8068 bytes
data/wffc.indiv.rda | Bin 2570 -> 2565 bytes
data/wffc.nc.rda | Bin 4244 -> 4292 bytes
data/wffc.rda | Bin 10253 -> 10236 bytes
data/wffc.teams.rda | Bin 541 -> 542 bytes
data/xs.nz.rda | Bin 221580 -> 221524 bytes
inst/doc/categoricalVGAM.Rnw | 46 +-
inst/doc/categoricalVGAM.pdf | Bin 677833 -> 678663 bytes
man/AA.Aa.aa.Rd | 7 +-
man/AB.Ab.aB.ab.Rd | 7 +-
man/AB.Ab.aB.ab2.Rd | 7 +-
man/ABO.Rd | 7 +-
man/CommonVGAMffArguments.Rd | 130 +-
man/G1G2G3.Rd | 17 +-
man/Inv.gaussian.Rd | 2 +-
man/Links.Rd | 157 +-
man/MNSs.Rd | 21 +-
man/Max.Rd | 32 +-
man/Pareto.Rd | 24 +-
man/Qvar.Rd | 18 +-
man/Rcam.Rd | 27 +-
man/Tol.Rd | 37 +-
man/VGAM-package.Rd | 87 +-
man/acat.Rd | 20 +-
man/alaplace3.Rd | 25 +-
man/alaplaceUC.Rd | 2 +-
man/amh.Rd | 8 +-
man/amhUC.Rd | 2 +-
man/amlbinomial.Rd | 5 +-
man/amlexponential.Rd | 4 +-
man/amlnormal.Rd | 4 +-
man/amlpoisson.Rd | 4 +-
man/backPain.Rd | 3 +
man/benini.Rd | 53 +-
man/beta.ab.Rd | 19 +-
man/betaII.Rd | 6 -
man/betabinomial.Rd | 7 +-
man/betabinomial.ab.Rd | 7 +-
man/betaff.Rd | 72 +-
man/betageometric.Rd | 6 -
man/betanormUC.Rd | 9 +-
man/betaprime.Rd | 40 +-
man/bilogistic4.Rd | 4 +-
man/binom2.or.Rd | 6 -
man/binom2.rho.Rd | 9 +-
man/binom2.rhoUC.Rd | 28 +-
man/binomialff.Rd | 33 +-
man/binormal.Rd | 29 +-
man/bisa.Rd | 6 -
man/bisaUC.Rd | 8 +-
man/borel.tanner.Rd | 8 +-
man/bortUC.Rd | 8 +-
man/brat.Rd | 36 +-
man/bratt.Rd | 28 +-
man/cardUC.Rd | 2 +-
man/cardioid.Rd | 18 +-
man/cauchit.Rd | 57 +-
man/cauchy.Rd | 27 +-
man/ccoef.Rd | 15 +-
man/cdf.lmscreg.Rd | 25 +-
man/cennormal1.Rd | 33 +-
man/cenpoisson.Rd | 12 +-
man/cgo.Rd | 5 +
man/cgumbel.Rd | 9 +-
man/chinese.nz.Rd | 35 +-
man/chisq.Rd | 8 +-
man/cloglog.Rd | 80 +-
man/constraints.Rd | 41 +-
man/cqo.Rd | 136 +-
man/crashes.Rd | 6 +-
man/cratio.Rd | 11 +-
man/cumulative.Rd | 53 +-
man/dagum.Rd | 7 +-
man/dcennormal1.Rd | 5 +-
man/dexpbinomial.Rd | 84 +-
man/df.residual.Rd | 43 +-
man/dirichlet.Rd | 22 +-
man/dirmul.old.Rd | 16 +-
man/dirmultinomial.Rd | 38 +-
man/eexpUC.Rd | 6 +-
man/enzyme.Rd | 8 +-
man/erf.Rd | 1 +
man/erlang.Rd | 27 +-
man/eunifUC.Rd | 18 +-
man/expexp.Rd | 24 +-
man/expexp1.Rd | 30 +-
man/expgeometric.Rd | 8 +-
man/explink.Rd | 40 +-
man/explogarithmic.Rd | 9 +-
man/exponential.Rd | 18 +-
man/exppoisson.Rd | 10 +-
man/felix.Rd | 16 +-
man/felixUC.Rd | 8 +-
man/fff.Rd | 57 +-
man/fgm.Rd | 7 +-
man/fisherz.Rd | 69 +-
man/fisk.Rd | 15 +-
man/fiskUC.Rd | 1 +
man/fittedvlm.Rd | 43 +-
man/fnormUC.Rd | 36 +-
man/fnormal1.Rd | 41 +-
man/frank.Rd | 29 +-
man/frankUC.Rd | 25 +-
man/frechet.Rd | 29 +-
man/freund61.Rd | 7 +-
man/fsqrt.Rd | 40 +-
man/gamma1.Rd | 32 +-
man/gamma2.Rd | 29 +-
man/gamma2.ab.Rd | 6 -
man/gammahyp.Rd | 16 +-
man/garma.Rd | 49 +-
man/gaussianff.Rd | 22 +-
man/genbetaII.Rd | 15 +-
man/gengamma.Rd | 36 +-
man/genpoisson.Rd | 20 +-
man/genrayleigh.Rd | 9 +-
man/geometric.Rd | 34 +-
man/gev.Rd | 55 +-
man/gevUC.Rd | 6 +-
man/golf.Rd | 88 +-
man/gompertz.Rd | 133 +
man/gompertzUC.Rd | 78 +
man/gpd.Rd | 54 +-
man/gpdUC.Rd | 4 +-
man/grc.Rd | 52 +-
man/gumbel.Rd | 54 +-
man/gumbelII.Rd | 149 +
man/gumbelIIUC.Rd | 77 +
man/gumbelIbiv.Rd | 23 +-
man/gumbelUC.Rd | 37 +-
man/guplot.Rd | 6 +-
man/hormone.Rd | 119 +
man/hspider.Rd | 14 +-
man/huber.Rd | 23 +-
man/huberUC.Rd | 8 +-
man/huggins91.Rd | 23 +-
man/huggins91UC.Rd | 20 +-
man/hyperg.Rd | 27 +-
man/hypersecant.Rd | 26 +-
man/hzeta.Rd | 13 +-
man/hzetaUC.Rd | 12 +-
man/iam.Rd | 17 +-
man/identity.Rd | 44 +-
man/inv.gaussianff.Rd | 16 +-
man/invbinomial.Rd | 27 +-
man/invlomax.Rd | 14 +-
man/invparalogistic.Rd | 14 +-
man/invparalogisticUC.Rd | 6 +-
man/is.parallel.Rd | 69 +
man/is.zero.Rd | 63 +
man/koenker.Rd | 19 +-
man/koenkerUC.Rd | 24 +-
man/kumar.Rd | 13 +-
man/kumarUC.Rd | 2 +-
man/lambertW.Rd | 9 +
man/laplace.Rd | 36 +-
man/laplaceUC.Rd | 4 +-
man/leipnik.Rd | 31 +-
man/lerch.Rd | 25 +-
man/leukemia.Rd | 4 +
man/levy.Rd | 40 +-
man/lgammaUC.Rd | 7 +-
man/lgammaff.Rd | 20 +-
man/lindUC.Rd | 69 +
man/lindley.Rd | 90 +
man/lino.Rd | 18 +-
man/linoUC.Rd | 49 +-
man/lirat.Rd | 6 +
man/lms.bcg.Rd | 24 +-
man/lms.bcn.Rd | 70 +-
man/lms.yjn.Rd | 23 +-
man/logUC.Rd | 7 +-
man/logc.Rd | 44 +-
man/loge.Rd | 35 +-
man/logff.Rd | 37 +-
man/logistic.Rd | 35 +-
man/logit.Rd | 95 +-
man/loglapUC.Rd | 50 +-
man/loglaplace.Rd | 53 +-
man/loglinb2.Rd | 18 +-
man/loglinb3.Rd | 2 +-
man/loglog.Rd | 41 +-
man/lognormal.Rd | 23 +-
man/logoff.Rd | 47 +-
man/lomax.Rd | 15 +-
man/lomaxUC.Rd | 4 +-
man/lqnorm.Rd | 44 +-
man/lrtest.Rd | 8 +-
man/lv.Rd | 23 +-
man/lvplot.Rd | 22 +-
man/lvplot.qrrvglm.Rd | 50 +-
man/lvplot.rrvglm.Rd | 16 +-
man/makeham.Rd | 158 +
man/makehamUC.Rd | 97 +
man/margeff.Rd | 24 +-
man/maxwell.Rd | 15 +-
man/maxwellUC.Rd | 2 +-
man/mbinomial.Rd | 35 +-
man/mccullagh89.Rd | 13 +-
man/micmen.Rd | 23 +-
man/mix2exp.Rd | 26 +-
man/mix2normal1.Rd | 53 +-
man/mix2poisson.Rd | 46 +-
man/mlogit.Rd | 107 +
man/model.framevlm.Rd | 28 +-
man/model.matrixvlm.Rd | 18 +-
man/moffset.Rd | 26 +-
man/morgenstern.Rd | 15 +-
man/multinomial.Rd | 62 +-
man/nakagami.Rd | 21 +-
man/nakagamiUC.Rd | 6 +-
man/nbcanlink.Rd | 62 +-
man/nbolf.Rd | 73 +-
man/negbinomial.Rd | 36 +-
man/negbinomial.size.Rd | 20 +-
man/normal1.Rd | 26 +-
man/notdocumentedyet.Rd | 42 +-
man/ordpoisson.Rd | 44 +-
man/oxtemp.Rd | 2 +-
man/paralogistic.Rd | 17 +-
man/paralogisticUC.Rd | 7 +-
man/pareto1.Rd | 34 +-
man/paretoIV.Rd | 16 +-
man/paretoIVUC.Rd | 58 +-
man/perks.Rd | 143 +
man/perksUC.Rd | 76 +
man/persp.qrrvglm.Rd | 20 +-
man/plackUC.Rd | 10 +-
man/plackett.Rd | 12 +-
man/plotdeplot.lmscreg.Rd | 2 +-
man/plotqrrvglm.Rd | 26 +-
man/plotqtplot.lmscreg.Rd | 2 +-
man/{plotrcam0.Rd => plotrcim0.Rd} | 34 +-
man/poissonff.Rd | 46 +-
man/poissonp.Rd | 21 +-
man/polf.Rd | 83 +-
man/polonoUC.Rd | 4 +-
man/posbinomUC.Rd | 35 +-
man/posbinomial.Rd | 21 +-
man/posnegbinUC.Rd | 6 +-
man/posnegbinomial.Rd | 44 +-
man/posnormUC.Rd | 13 +-
man/posnormal1.Rd | 30 +-
man/pospoisson.Rd | 30 +-
man/powl.Rd | 56 +-
man/predictvglm.Rd | 99 +-
man/prentice74.Rd | 20 +-
man/probit.Rd | 47 +-
man/propodds.Rd | 16 +-
man/prplot.Rd | 8 +-
man/qrrvglm.control.Rd | 23 +-
man/qtplot.gumbel.Rd | 71 +-
man/qtplot.lmscreg.Rd | 4 +-
man/quasibinomialff.Rd | 52 +-
man/quasipoissonff.Rd | 29 +-
man/rayleigh.Rd | 27 +-
man/rayleighUC.Rd | 10 +-
man/rcqo.Rd | 49 +-
man/rdiric.Rd | 6 +-
man/recexp1.Rd | 10 +-
man/reciprocal.Rd | 59 +-
man/recnormal1.Rd | 8 +-
man/rhobit.Rd | 68 +-
man/riceUC.Rd | 18 +-
man/riceff.Rd | 26 +-
man/rig.Rd | 16 +-
man/rlplot.egev.Rd | 8 +-
man/rrar.Rd | 12 +-
man/rrvglm.Rd | 30 +-
man/rrvglm.control.Rd | 24 +-
man/rrvglm.optim.control.Rd | 7 +-
man/ruge.Rd | 8 +-
man/s.Rd | 22 +-
man/seq2binomial.Rd | 31 +-
man/simplex.Rd | 8 +-
man/simplexUC.Rd | 16 +-
man/sinmad.Rd | 13 +-
man/sinmadUC.Rd | 6 +-
man/skellam.Rd | 40 +-
man/skellamUC.Rd | 16 +-
man/skewnormal1.Rd | 19 +-
man/slash.Rd | 30 +-
man/slashUC.Rd | 20 +-
man/snormUC.Rd | 5 +-
man/sratio.Rd | 17 +-
man/studentt.Rd | 21 +-
man/tikuv.Rd | 23 +-
man/tikuvUC.Rd | 9 +-
man/tobit.Rd | 109 +-
man/tobitUC.Rd | 10 +-
man/toxop.Rd | 4 +
man/tparetoUC.Rd | 12 +-
man/triangle.Rd | 23 +-
man/triangleUC.Rd | 8 +-
man/trplot.Rd | 23 +-
man/trplot.qrrvglm.Rd | 47 +-
man/undocumented-methods.Rd | 35 +-
man/uqo.Rd | 31 +-
man/venice.Rd | 6 +-
man/vgam-class.Rd | 4 +-
man/vgam.Rd | 94 +-
man/vgam.control.Rd | 2 +-
man/vglm-class.Rd | 15 +-
man/vglm.Rd | 76 +-
man/vglm.control.Rd | 67 +-
man/vglmff-class.Rd | 10 +-
man/vonmises.Rd | 26 +-
man/vsmooth.spline.Rd | 38 +-
man/waitakere.Rd | 4 +-
man/wald.Rd | 7 +-
man/weibull.Rd | 75 +-
man/weightsvglm.Rd | 25 +-
man/wffc.P2star.Rd | 2 +-
man/wffc.Rd | 20 +-
man/wffc.indiv.Rd | 2 +
man/xs.nz.Rd | 8 +-
man/yeo.johnson.Rd | 10 +-
man/yip88.Rd | 32 +-
man/yulesimon.Rd | 29 +-
man/yulesimonUC.Rd | 2 +-
man/zabinomUC.Rd | 4 +-
man/zabinomial.Rd | 10 +-
man/zageomUC.Rd | 4 +-
man/zageometric.Rd | 17 +-
man/zanegbinUC.Rd | 2 +-
man/zanegbinomial.Rd | 18 +-
man/zapoisUC.Rd | 8 +-
man/zapoisson.Rd | 15 +-
man/zero.Rd | 3 +-
man/zeta.Rd | 16 +-
man/zetaff.Rd | 21 +-
man/zibinomUC.Rd | 10 +-
man/zibinomial.Rd | 28 +-
man/zigeomUC.Rd | 7 +-
man/zigeometric.Rd | 43 +-
man/zinegbinUC.Rd | 5 +-
man/zinegbinomial.Rd | 20 +-
man/zipebcom.Rd | 30 +-
man/zipf.Rd | 19 +-
man/zipfUC.Rd | 6 +-
man/zipoisUC.Rd | 10 +-
man/zipoisson.Rd | 37 +-
424 files changed, 33052 insertions(+), 23727 deletions(-)
diff --git a/DESCRIPTION b/DESCRIPTION
index 854527e..83d1374 100755
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,6 +1,6 @@
Package: VGAM
-Version: 0.8-7
-Date: 2012-04-13
+Version: 0.9-0
+Date: 2012-09-01
Title: Vector Generalized Linear and Additive Models
Author: Thomas W. Yee <t.yee at auckland.ac.nz>
Maintainer: Thomas Yee <t.yee at auckland.ac.nz>
@@ -17,6 +17,6 @@ Imports: methods, stats, stats4
URL: http://www.stat.auckland.ac.nz/~yee/VGAM
LazyLoad: yes
LazyData: yes
-Packaged: 2012-04-12 20:01:03 UTC; tyee001
+Packaged: 2012-09-01 04:39:09 UTC; tyee001
Repository: CRAN
-Date/Publication: 2012-04-13 10:59:27
+Date/Publication: 2012-09-01 05:55:16
diff --git a/MD5 b/MD5
index 7bbeab4..579f1ea 100644
--- a/MD5
+++ b/MD5
@@ -1,9 +1,10 @@
60b13c57b66bb77e65321c5c0a3b1dab *BUGS
-26f074100689286861a47e26ab4b79cc *DESCRIPTION
+eba6e49209343f83cd62777ae45f40b8 *DESCRIPTION
dd959d3a0cd680792122813a7d58d506 *DISCLAIMER
-322232b1f64840d3d3360872f22c9adc *NAMESPACE
-7458e7dbf78a957471b691b3b11bf6ab *NEWS
-dd21b3270922941c5f772fcbc0fdbb53 *R/aamethods.q
+8d0d88feac7fa558a6ec04249d6b4feb *NAMESPACE
+4a7690a3913431a23fbef73c99a7ebba *NEWS
+555d62a4356133367fbd0d49ed5d19d1 *R/Links.R
+ca2f5b0e1d1e0c4bb984a82a7d837495 *R/aamethods.q
b3239d3ffdffe1de9581354b05d66016 *R/add1.vglm.q
95904ca107a7d51b5a7a6603304e962b *R/attrassign.R
fa7ea29619a00514f855a075070edae7 *R/bAIC.q
@@ -11,69 +12,70 @@ fa7ea29619a00514f855a075070edae7 *R/bAIC.q
56bed730c52b3d44ff151e0b6db57be6 *R/calibrate.q
b09327ef1094ac0ff74c3283155ea3fb *R/cao.R
0bdd385d0a1232292595d702d2ae167d *R/cao.fit.q
-9e68dd0424ac75874e253c22099ce952 *R/coef.vlm.q
+714327842f7526ae1527e3c2f8cd3a9b *R/coef.vlm.q
a3f5ad1bd124d07a69c74b7f917a9a58 *R/cqo.R
e94fae353c86c2ece43792e9c2f777a0 *R/cqo.fit.q
-6d76c673deadaf7f4e502bc435292331 *R/deviance.vlm.q
+211da72b82e2edb1271019a0db41ae12 *R/deviance.vlm.q
80861c2c2454b9c298e11cbadc431056 *R/effects.vglm.q
-472f0fb8bc4a8adbc9cce363058fbfc7 *R/family.aunivariate.R
-1c8fd0fe2496973ee5916e33de041cd4 *R/family.basics.R
-d66f94ecff21e65dbd27417f0b485a47 *R/family.binomial.R
-37abad6c4eaa05fea096bb1f4e3da8cc *R/family.bivariate.R
-ea284e385d89be09aa051fb36c70f785 *R/family.categorical.R
-aa067e1804876a7cb4b091aa328af510 *R/family.censored.R
-ba2f3c8fe4b2e8ba26973027fa2d676d *R/family.circular.R
-0a25502954db9644a4a2aea9e28992c4 *R/family.exp.R
-b184220a0cd72dac2a00bb463b0cc0a5 *R/family.extremes.R
+6ec5fa8424b3096f84725878bb303296 *R/family.actuary.R
+cbed3db74d49ab7d24f8c07737b186b3 *R/family.aunivariate.R
+ac6e4976d5c319558a3fbd2fb01f5c58 *R/family.basics.R
+36efe4bc416dca7ab18b96dcc56f67f8 *R/family.binomial.R
+c4d71b12cf141977e5ea1f14f362bac1 *R/family.bivariate.R
+49ac6615afe7a8da3388392f8136ee9e *R/family.categorical.R
+3a8cbb695a8700fd26a6e9de3a2920b7 *R/family.censored.R
+63d90d55ae10db5f8fd4367b2fe3d775 *R/family.circular.R
+52f9dc7247202a2d76e69e7c213aa786 *R/family.exp.R
+8b339c2d1f2f274659ed53b5bb4b6cdd *R/family.extremes.R
938226f9a34e4f6fd80108a9a89c2651 *R/family.fishing.R
b0910b3f615575850e1417740dedaaa7 *R/family.functions.R
-1a2e09d03675bb6dff6e61cb2e7c76d5 *R/family.genetic.R
-ebe0608572becf749103b7737c7a7acc *R/family.glmgam.R
-3c3fabbb223815ee25a4a5c62c2e3c7b *R/family.loglin.R
+dd14928a169b772f96b78fe7126377fc *R/family.genetic.R
+efd7915c4bbd4ab239cd590422af74cb *R/family.glmgam.R
+d81d63f88573296e4241ffe8a87ee99d *R/family.loglin.R
e159913225b326c326e2c8bffde57dcc *R/family.math.R
-5bb0c3ac9343bbf891bfa432f93dea17 *R/family.mixture.R
-0fa3e3d6a3138cc8eac0f320a389ae11 *R/family.nonlinear.R
-55fc63680ad925f0e69e0fa3af97e2aa *R/family.normal.R
-9135d29542ca7103b3a9155b79f65f94 *R/family.others.R
-c60af8cd9158271bb3bf1ae0c8807b56 *R/family.positive.R
-a9ba39cf263d370afa214a38294b6e87 *R/family.qreg.R
-fdca4da3063c9acfb9c5abe845c333af *R/family.quantal.R
-b7b27b87e010b4a4bcd91a94a65476b9 *R/family.rcam.R
-f9ae840ae9e77833947050013e493a29 *R/family.rcqo.R
-edc9ff129ca91ba61a4f885131870024 *R/family.robust.R
-fe7dc2264950869720ee398b61893324 *R/family.rrr.R
-4b07bd955d32ceb26d12eb62ce121867 *R/family.survival.R
-c737f9809bc0727f6733906db4fc8f9e *R/family.ts.R
-fa0b72d0e2ea47b644e8571f0e7ecb6e *R/family.univariate.R
+cf1ef5c0906c17220d1fe0d061514d0d *R/family.mixture.R
+8c91b1d7a9cc6c3acf7d45aa19f6853c *R/family.nonlinear.R
+14985d0dbaed80504e0a3507afe5810a *R/family.normal.R
+d017890f91aeb1645df847674f904184 *R/family.others.R
+9b5c067f38b1b25476bc52165676e367 *R/family.positive.R
+bfd45858f4bec29d0deacf2b58a71e59 *R/family.qreg.R
+57ae7b23e7562be6d99fa29ecc61c234 *R/family.quantal.R
+e0b7d3dc700e001480e3d5b7d7790e40 *R/family.rcim.R
+1e4aeedf2b6062c72ee0844ed57fc5f2 *R/family.rcqo.R
+a7fc259a5e78902f612d8c5bcc0852e0 *R/family.robust.R
+27382145fd1f60de7726fba47e7952b8 *R/family.rrr.R
+955838f5c05811c822ef1db7b15fce4a *R/family.survival.R
+f8f2bc7b1d740b98b18c7de6915f7ff8 *R/family.ts.R
+8490fe238fa094ad63ae5db1d04cf83d *R/family.univariate.R
ba86e91c886455a19a223ab62d72910b *R/family.vglm.R
-d1f08f0da3445ebfbdc13a9a67f40b90 *R/family.zeroinf.R
+202dae68295c91f63a80dc2e342e1f77 *R/family.zeroinf.R
c4209518badc8f38d39cd2d7b8405d24 *R/fittedvlm.R
c6167af1886c3d0340b9b41a66f8d1a9 *R/formula.vlm.q
33aa96487bc94130897db4ca82ec9559 *R/generic.q
-204b9b2d8db1b10b17e96e798ea907b1 *R/links.q
-0aef958fdd7db1b20ee26c818807d2c1 *R/logLik.vlm.q
+646e478fc9a1c0635921e05b1b546f7a *R/links.q
+ec95b1084b5bba66c143c75ef5425268 *R/logLik.vlm.q
12c9c7e7246afe10034cdd60a90a73d0 *R/lrwaldtest.R
-2ad7539a7b037d7e542f7d90378f8591 *R/model.matrix.vglm.q
+f88c81e01b502e036f99c9d38d39e284 *R/model.matrix.vglm.q
76b26cdae089197c27a697d06ad16c30 *R/mux.q
-939ddbb40d567790aba7c2e0fbf84ad2 *R/nobs.R
-0cb5b755110ed2ada83d8621c94db5ee *R/plot.vglm.q
-01e3395c8bf121d1c2a648942397411c *R/predict.vgam.q
-b72b8be0e0cccf16319dd8faf03f90c7 *R/predict.vglm.q
-e8f63b7ca71b2dc7778c0ae10c13b70d *R/predict.vlm.q
+473fd0f09548cd41a4d126dc0f3498ce *R/nobs.R
+ca74849eea37277b840ca90fe843beb4 *R/plot.vglm.q
+225861a6f45f0b2efe3bd315f8a5aaff *R/predict.vgam.q
+21a0facdd8b7d12fd37eefcc65e7228a *R/predict.vglm.q
+c1ab98752f47716f978a9f1a406b1112 *R/predict.vlm.q
d56618bb580ea017562efbb27aa69ee5 *R/print.vglm.q
04a7a1cc3e40dc1b4e861e191e91edfd *R/print.vlm.q
9fb95687e7080c3b216ee7c79cb1be0a *R/qrrvglm.control.q
-5b4ac13de461e108659579b651cc8d09 *R/qtplot.q
-f9a4d1bd1d2eb0a93d46d6043909c01d *R/residuals.vlm.q
+865a789765db838345b85273ced9189f *R/qtplot.q
+cc43b350b8191fca4af2b1e969256a27 *R/residuals.vlm.q
144643456f5d88647df94b5421bd850a *R/rrvglm.R
-9029b2cf71b02f40d8b8d1424ee044e4 *R/rrvglm.control.q
+e1375a19876aca5ad23de2b5548223a0 *R/rrvglm.control.q
a64fe1a10cc52a121f1af8f410953e4c *R/rrvglm.fit.q
b7b95cdd6591250161f2c93699983466 *R/s.q
-b0970500631acb1935d4415e6d6054dc *R/s.vam.q
-fe6132afff206af8d15031d48a880a4d *R/smart.R
+8d57d9c6666bc912dc03123fb36b6908 *R/s.vam.q
+6dff9b78299e3ecf4e5087946d54cb95 *R/smart.R
7ce45be4048ac6023d1bfcd703f80391 *R/step.vglm.q
44fc620e4e847fee0a9ce1365f3ffd27 *R/summary.vgam.q
-729c7eafc395a92fb06276b8fcc064e6 *R/summary.vglm.q
+c2cfd1291178b694071730b2d53d02c2 *R/summary.vglm.q
242d83e29ebcdbebf30dd6e1b256aaa6 *R/summary.vlm.q
753dad5450557ae57cbff2cb4c229b3a *R/uqo.R
f223707b00020c752536ef95ea7150bb *R/vgam.R
@@ -85,58 +87,59 @@ f03cb94631bcfdccf01e1542fb0f976e *R/vglm.R
fb812b12aaf59ab251153fcc3482e556 *R/vglm.fit.q
38aeb51b3ed4d9a4a5f1af56da21b32b *R/vlm.R
e76f5e142ff6bc7ad92fc0eece93bb9d *R/vlm.wfit.q
-991035d00cfe83ca882204627e8c226f *R/vsmooth.spline.q
+f9c093d80ffab1851abc4459e35050d9 *R/vsmooth.spline.q
c1c2fce6995f00d9ec512f818662a7c1 *R/zzz.R
-b67ef2298d32cb077295be681423afb0 *data/alclevels.rda
-7429c1f9203204dadbebc711d77702e1 *data/alcoff.rda
-ef1b7340ee80284a710aa9928f0715c1 *data/auuc.rda
-19326343d8314f353c3191387bcf99f9 *data/backPain.rda
+f8995346c8c9f824505f62825d3afa0d *data/alclevels.rda
+8879c9b3cca96c907424391706f5bf56 *data/alcoff.rda
+ead10f7aec9214d93787e0f6cfa2c26f *data/auuc.rda
+3ca7268f4f9287b28bb45ce767611375 *data/backPain.rda
4fa3eac69a59ea5ed0123d54528e5595 *data/backPain.txt.gz
e039fd36c33b359830b2ac811ca7fc49 *data/bmi.nz.txt.xz
-845731a9561bc0b05164462304498662 *data/car.all.rda
+837a49777680ee5db5bc19d979b41352 *data/car.all.rda
b29c1a4125f0898885b0a723442d6a92 *data/chest.nz.txt.bz2
0f45f6779a3c3583f4edf68da4045509 *data/chinese.nz.txt.gz
3cb8bc8e1fc615416f0c8838a50b3f51 *data/coalminers.txt.gz
-58964029796c2023e5daedb35933dcf3 *data/crashbc.rda
-fca2a442510ee3bf511baafb954ea2fb *data/crashf.rda
-c8b9f611102c76691de2c8b7eef3e9e8 *data/crashi.rda
-642d9ec5d352c3147f2f508eb49ce511 *data/crashmc.rda
-b84768569fedb251d7a11828e5759829 *data/crashp.rda
-a4735d92081129344077aa4e2ba5897c *data/crashtr.rda
-8332de0e748c5a8b26e45a16b68a95a5 *data/crime.us.rda
-589b28f1ffeffcae40d7135ec24ac92c *data/datalist
+354e62c12a62b99b1f6c71dff0c67d7c *data/crashbc.rda
+7168dbd2899219a06842ad9245a9aa2f *data/crashf.rda
+205e81b515e761af9ea0926e84bbf34e *data/crashi.rda
+7d5d4a36296b2867eca6b14325d551a4 *data/crashmc.rda
+8352cfe8d6008c38cfee1ad1b3d431a6 *data/crashp.rda
+f8cdc2708b60ec3aa810dcbc57987727 *data/crashtr.rda
+9e9daede0c8ac4a0d94c0124db7c64dc *data/crime.us.rda
+5914585931c1d74e9ce75c859ecdaaab *data/datalist
08e87bb80a2364697b17ccec6260387c *data/enzyme.txt.gz
-ffdcf902fc068c144c21d6a16f8dd842 *data/fibre15.rda
-3000afab70d6fc680758f78b63ed237c *data/fibre1dot5.rda
-c6882bf02d56aa739aa6d983966ec0bb *data/finney44.rda
-8877f885aec9b36d566e89eab1967903 *data/gala.rda
+8a6e6874a36a0b0fce11b00f47ba5eec *data/fibre15.rda
+eacd8c708d059e1596542bf9fa188992 *data/fibre1dot5.rda
+e8b6cda757bcfc6ef70f39cae18000cb *data/finney44.rda
+f373bbea310609ccd4dba24d3b29310f *data/gala.rda
8508a1cb5a09b65616ed9dfe1fc7a7a9 *data/gew.txt.gz
bec512b2d2d680889c9b71c7b97dbffd *data/grain.us.txt.bz2
-3770872fdbf09060872e7f8f717b02ef *data/hspider.rda
-783ddd6effb518bdac2ae1dd7d2f82f0 *data/hued.rda
-be83d8afb5b70f433d01492faa009ebe *data/huie.rda
+9dcb8cdf026f5468fa70f8037fd72a0b *data/hormone.txt.bz2
+3f9fe2fafc59fbebe839018e2e2a9167 *data/hspider.rda
+66a528a02fc7bf76888cb436304e32c3 *data/hued.rda
+3224319b9eb26228a67fa1afddddbd21 *data/huie.rda
dffe21fbabf645127bccc3f3733098a7 *data/hunua.txt.bz2
-53a45bc0ea38d4709b7de98e128db620 *data/huse.rda
-129d2d5a1a0b299ea098bec1baff1129 *data/leukemia.rda
+a42b02a1b149d0f68924efe5a1677cfc *data/huse.rda
+e9f41116a56cb8b27abe4779cfe5edf9 *data/leukemia.rda
aba4885e0eeda8ee887a422fee01e02a *data/lirat.txt.gz
-74278085ea65524ef068214bdcc2bea7 *data/marital.nz.rda
-c13925d4856f9a209178ceff2dba4460 *data/mmt.rda
+978765fe2df9da5d12ac22f7b76d6033 *data/marital.nz.rda
+86c034950ba10d2f51df20c07d6d3599 *data/mmt.rda
1017612628ed904e97e5a426d307b16f *data/olympic.txt.gz
3ed63397c4a34f3233326ade6cfd1279 *data/oxtemp.txt.gz
-e3d97c5cee5ee7827697c89879103fa8 *data/pneumo.rda
-f1a96f02d1e62da318c96bfb05fc1306 *data/rainfall.rda
-2727500fb75423a5ced3144cfc858575 *data/ruge.rda
-bd8941dad8eb9beead509fa807dd4934 *data/toxop.rda
+42e0f4010aa801431f44233ca9f31bde *data/pneumo.rda
+cbfedcc4f17fd8a9b91e45254a0fd1bd *data/rainfall.rda
+21428fae28f282371d93975f1a57815e *data/ruge.rda
+c26f47d0d34a00f305777ffff683eefa *data/toxop.rda
1b059fc42c890bf89f2282298828d098 *data/ucberk.txt.gz
-232ee16be25487ae3ebd5752da883785 *data/ugss.rda
-7bd431745898cb1f7dcc804fe342a116 *data/venice.rda
-70f931d05360444db16f16db9d5d4bde *data/venice90.rda
+5860454aa3f06e743f0a126d4bbe7d9c *data/ugss.rda
+cde476ebb7ed10e5096e608fc819c697 *data/venice.rda
+34655ef9a62c04a076581197337217ad *data/venice90.rda
e990ca4deea25b60febd2d315a6a9ec4 *data/waitakere.txt.bz2
-be09fbf98efe72fdcb84735763d37352 *data/wffc.indiv.rda
-e76a86753610c12b5fa243b067b59ba1 *data/wffc.nc.rda
-a9a2b76507470c917a481f6f4bfe2862 *data/wffc.rda
-57c7609bb51329e07f9f530817b95eca *data/wffc.teams.rda
-c6c0d32c7457735e3fede0d3688dfd2a *data/xs.nz.rda
+3d0cfa016c4497d13feb41d146b12a03 *data/wffc.indiv.rda
+00fde79b032634a43cb03bcd733c5b82 *data/wffc.nc.rda
+e657c5db8078ce26fb46d85e0586b711 *data/wffc.rda
+73992544fe6110710bf257d34bdc9be4 *data/wffc.teams.rda
+75d3578e31965889ed7aabaaac670a01 *data/xs.nz.rda
81f7f0844a196dc48e91870c4cfafc99 *demo/00Index
532aba4ad4cac611141491a5bb886236 *demo/binom2.or.R
a7db0d0c4cc964b01ddbe0cb74153304 *demo/cqo.R
@@ -145,13 +148,13 @@ d2c02ccaf4d548cc83b3148e55ff0fa3 *demo/lmsqreg.R
a3d2728927fc5a3090f8f4ae9af19e1a *demo/vgam.R
00eee385e1a5c716a6f37797c3b4bec5 *demo/zipoisson.R
45d6563f929e021db90f9c0289e6093e *inst/CITATION
-51437c0e17cd2de2d3548017336eb8b1 *inst/doc/categoricalVGAM.Rnw
-a3ca882ababc2b49df5a6d6ddda3d185 *inst/doc/categoricalVGAM.pdf
+b1a84a83b8fb788d31d509e17936b603 *inst/doc/categoricalVGAM.Rnw
+32e56802e5c4b20821e23c0edd0603a3 *inst/doc/categoricalVGAM.pdf
e4c5415e487f533b70695b17e40d97bc *inst/doc/categoricalVGAMbib.bib
-ae4c252ab1ff7ea5097b50925524c6c8 *man/AA.Aa.aa.Rd
-6e6488fe17bda74157417f38f7d63df1 *man/AB.Ab.aB.ab.Rd
-426224676fcf86a274ee40a1e897ff51 *man/AB.Ab.aB.ab2.Rd
-4d087454d28e88143204b8ae0a6e94a3 *man/ABO.Rd
+fbc2e1f379d2815d0dd19ab1b38ac031 *man/AA.Aa.aa.Rd
+4bab796e819b1a3dc50c612c79a9ed77 *man/AB.Ab.aB.ab.Rd
+af731838bfdeab2348710b41b09c8247 *man/AB.Ab.aB.ab2.Rd
+954a72710f0b6ae0a2a4013ba53134fb *man/ABO.Rd
e205077baf82273656dade8e39dfd0f0 *man/AICvlm.Rd
4c634c4ac3a9673b49e00a21a5edcac0 *man/Coef.Rd
42eae1271b8c7f35a723eec2221a21f2 *man/Coef.qrrvglm-class.Rd
@@ -159,394 +162,408 @@ b00890f6b16bb85829fcea8e429045b9 *man/Coef.qrrvglm.Rd
7750539b34da20b20c40be62371fbc68 *man/Coef.rrvglm-class.Rd
5bff76cdc1894e593aa8d69a6426b0b3 *man/Coef.rrvglm.Rd
02efc2828e76eac595695059463d1d47 *man/Coef.vlm.Rd
-323c95578027a70f32ccdee741eb7e00 *man/CommonVGAMffArguments.Rd
+9293e04f06a3076e2030005bd2f84a78 *man/CommonVGAMffArguments.Rd
4c84f8608e7e5a2a69fbb22198aadf95 *man/DeLury.Rd
-2243f6f66449d96a9c370d9cb118bc85 *man/G1G2G3.Rd
-8594694ec7498eb252846e5e98930532 *man/Inv.gaussian.Rd
-40f8887a9e6322c1bea8ce385468c991 *man/Links.Rd
-0204cf1e24403cbd66194f76dc3f1040 *man/MNSs.Rd
-86a807027a2ed716e89276800c8714be *man/Max.Rd
+5bb061aa2d95a580d67ffd29200de30c *man/G1G2G3.Rd
+f7bc9b5114ed94e014016aed05b8e7d3 *man/Inv.gaussian.Rd
+77388e0223539826ca69389d46f80550 *man/Links.Rd
+0a95f8292850ef5b0fcf516400864c84 *man/MNSs.Rd
+45c9ca6851177b813be07e2446614721 *man/Max.Rd
2e0f16626b262cb24ca839f7313e8fb9 *man/Opt.Rd
-a0c448aa48678a37e4fc983bb532d141 *man/Pareto.Rd
-a11e8355c8a19a851bf46809073b526a *man/Qvar.Rd
-0404984840078254ed64e04618bf56ca *man/Rcam.Rd
+f9fb54b978cba49b278630f9403dd73c *man/Pareto.Rd
+c361935c5582a73d817e33febeec862a *man/Qvar.Rd
+4273365f7ee730f68259e69fb65f7746 *man/Rcam.Rd
2db32b22773df2628c8dbc168636c9f0 *man/SurvS4-class.Rd
4f4e89cb6c8d7db676f3e5224d450271 *man/SurvS4.Rd
-56b6bf93ed5da4c3e8324758bfde36aa *man/Tol.Rd
-69e999f635cae6333515c98a09a8b7c0 *man/VGAM-package.Rd
-0ac2556ab681b59598ad2170e475f25a *man/acat.Rd
-21abefde36c66867cc91bab989cc28ff *man/alaplace3.Rd
-0faf4d7fdfb9526dec05f6ff87680b90 *man/alaplaceUC.Rd
-fc94162782c395640db18e1ff7c6ebb5 *man/amh.Rd
-df8c8413b03b440d0451f50d92321e0f *man/amhUC.Rd
-73bb3963d43fd465ff2dd6afdb5473d1 *man/amlbinomial.Rd
-bc2496ef5c112b9d663b1fc90a1c493b *man/amlexponential.Rd
-dc06ac869a484aa41dd301d11f5372f3 *man/amlnormal.Rd
-2c2e41401482c0d156dd568480888925 *man/amlpoisson.Rd
+1f34fdf36c631e984d2a9f28bf607b67 *man/Tol.Rd
+943c75146bb5ef05028dde4481884d32 *man/VGAM-package.Rd
+41de97f0bacb4bedc36a589af710ff99 *man/acat.Rd
+20dd8ec5a2dd956f2dbbdfa237a138ba *man/alaplace3.Rd
+670cc88c57c693ba72d1ee1fe69743b6 *man/alaplaceUC.Rd
+af06e5a2e0552a8ef63756f1c3bce00b *man/amh.Rd
+5e1012c84beb593f4558a9df064a3304 *man/amhUC.Rd
+f7baeef1c4920ff1040f87674cfe7909 *man/amlbinomial.Rd
+5bf31a5de5606026bb7d8ad5f3474552 *man/amlexponential.Rd
+16513a48808783de16394f9e0a381bbe *man/amlnormal.Rd
+6396ff5ca052ca79bee66e1e7d703395 *man/amlpoisson.Rd
ba175111a99a5dd998a544b32e3390d4 *man/auuc.Rd
-37adc3f8e2804c880143a06e475bfd81 *man/backPain.Rd
+e4b6fadd6f54fc3293c2d0016c7672c4 *man/backPain.Rd
34b5510370a46ab522a754c731a437be *man/benfUC.Rd
-103d6afe4d897881692170608c47e7a4 *man/benini.Rd
+c1483ea97ab8115ef70f90bc0984ac6d *man/benini.Rd
b3e26d0011014d3722b4ecb3675c4aea *man/beniniUC.Rd
-73192be7a4732b3e32cdc0edef65010e *man/beta.ab.Rd
-5af71e0de7839a5d7661cb20a5431f85 *man/betaII.Rd
+084de566e49c6576179252616603f88d *man/beta.ab.Rd
+35e3e02fe0995db0290ca31c4ac5d7b4 *man/betaII.Rd
41820caae54231fdfe4f43c64c8b2aa6 *man/betabinomUC.Rd
-1600b3f2a75c6a60546d1d01523b1b98 *man/betabinomial.Rd
-0258e72615475b5afbae20655f7d60f7 *man/betabinomial.ab.Rd
-4f1141b7ef59dcfb3c52d96cb41e44df *man/betaff.Rd
+2e338ffe0772901aca870d11acb5e072 *man/betabinomial.Rd
+55283e8cce35112fb0c664219b92b6a2 *man/betabinomial.ab.Rd
+be38265c59ae5f15c757009310e14a92 *man/betaff.Rd
da3fdbf88efd6225c08377a461e45c50 *man/betageomUC.Rd
-8c75be04378f771189e287d9ec77ee71 *man/betageometric.Rd
-f2729cad5024784c73e0d9fa6aaef394 *man/betanormUC.Rd
-9065dcf96fd6b05e60189a5d5a5ee551 *man/betaprime.Rd
+30933e446c25b25f33b59d50f596d6c9 *man/betageometric.Rd
+aa6ee6bd6c48de8d03f18a80b836edae *man/betanormUC.Rd
+f568faafa4b67d1f0bf9ba07ddc4a7f3 *man/betaprime.Rd
7adaeed3dae23da1a0cc5eb9358d4597 *man/bilogis4UC.Rd
-992e6e71ae8c5a12ef3664da492829bc *man/bilogistic4.Rd
-c1fe467f3523193935adfd6b8e3ead1a *man/binom2.or.Rd
+b81f6ad16bb834d3fde123062ba31ec8 *man/bilogistic4.Rd
+929e542ce0d1937818bbc7a28c595927 *man/binom2.or.Rd
048aeadf836fe881f654f34004ae7040 *man/binom2.orUC.Rd
-bb62a8e00f036e4c1ffd7b6c24793d78 *man/binom2.rho.Rd
-0a679878123b41e3eb8f7ec074c83dd9 *man/binom2.rhoUC.Rd
-4863f87dee822d43731cb82da063c443 *man/binomialff.Rd
-461ddeea757c9690113126296c2fac55 *man/binormal.Rd
+27716f59421fefe451a8dee31527d1fa *man/binom2.rho.Rd
+34a781218843e7b670c6192867ea40e9 *man/binom2.rhoUC.Rd
+023dfaa228619f7cefbb20566c36433b *man/binomialff.Rd
+7e87b855d981532ef91977c44baa59e4 *man/binormal.Rd
bdad9ecfb116c4f30f930bcaf7208735 *man/biplot-methods.Rd
-3de6128c31694785566e9212b2f63999 *man/bisa.Rd
-903c040af10a99cda997fc5a11402bfa *man/bisaUC.Rd
+84a98434cb39c14a367de2215e72c22b *man/bisa.Rd
+8dc011224820b9c25d52ac088d6c330d *man/bisaUC.Rd
1190d249811d1a2d7dc952f8af02e90a *man/bivgamma.mckay.Rd
342d3d5c9931bc7327dc44d346c402f6 *man/bmi.nz.Rd
-ca0505aeb6143228b5ce142954ed3ba7 *man/borel.tanner.Rd
-adc7dfd546ab8430e0806c3b965c4366 *man/bortUC.Rd
-d0f5ac12609fb094d86da4a90af85508 *man/brat.Rd
+df2a69a92e00c0433cc8f83ad970c89b *man/borel.tanner.Rd
+4e692566eefaedf275e8693ea2f6efbe *man/bortUC.Rd
+7bc3641f9f81a4eb77a304103e5f1dcc *man/brat.Rd
0eaf999500ce9554156f37acbfe1e01a *man/bratUC.Rd
-124bbd982a378dca2151fcc854a07dfa *man/bratt.Rd
+b4c37774de88cd2f3f8f5e89ced2b491 *man/bratt.Rd
f640961a0c1a206ce052a54bb7b4ca34 *man/calibrate-methods.Rd
702754aad58a33aba1594bc7d2d45acf *man/calibrate.Rd
6cc85adda04a13e2ef01e0da265b67fd *man/calibrate.qrrvglm.Rd
7bc25736ab5e60ead3c3bb6a34e34aa2 *man/calibrate.qrrvglm.control.Rd
7308576228b41ce02ac3b9f61c8f9f6e *man/cao.Rd
f15b81668cd82879e8f00897fb30eea9 *man/cao.control.Rd
-d42538f50f7b5ce49b81b59403485955 *man/cardUC.Rd
-8a2a5e9dfece6f88bc99a4c36cf59457 *man/cardioid.Rd
-1981e97b7ba95bd8f97053e46044053f *man/cauchit.Rd
-e7b9c33bacc1d02d937453ab6ef7234a *man/cauchy.Rd
+e4b532eb5880648443b6fc60b31fbc36 *man/cardUC.Rd
+6ce12b5487a1650d3289522fbb73e0c2 *man/cardioid.Rd
+288036a65bb6f386d29a99dd40e91a32 *man/cauchit.Rd
+81d694e2aea915b2d8ed6c406f517baa *man/cauchy.Rd
2ab80616c05e7aebdcf769c35316eab1 *man/ccoef-methods.Rd
-8805fcc3975bce184bc92154da60bc6e *man/ccoef.Rd
-fd0d4488ddb3aa386bf1ed76f759450b *man/cdf.lmscreg.Rd
-736c151641c47418c5641e4b50f72326 *man/cennormal1.Rd
-92e4f610ab29c8a3ce3d23e08e5be934 *man/cenpoisson.Rd
-f6c605b4eed73b77cd5a3d90098632be *man/cgo.Rd
-42cc5374d9f2d1fa077cabf5cb18cea2 *man/cgumbel.Rd
+35499ce13b26395bc61c5931d202cf24 *man/ccoef.Rd
+5985b55cbfe98a8a7d2b4de3fe3265bf *man/cdf.lmscreg.Rd
+bd25f55e6466226cb79f74482f793a3f *man/cennormal1.Rd
+15ae61dc3c4394f9c3d0dd89c2d337b0 *man/cenpoisson.Rd
+a443fafdb223e2fa87d3766ea31d3fd8 *man/cgo.Rd
+3780e11c1ea1d54dcf57137fe1179390 *man/cgumbel.Rd
8b1f242c28ecc87b8f3850ee789a144e *man/chest.nz.Rd
-fc640335c7cd7df304a7396820bd46c0 *man/chinese.nz.Rd
-92b1bbec2b9554215c23402cbd03ca04 *man/chisq.Rd
+488c3d97209a21d15ee76e547f3a7d99 *man/chinese.nz.Rd
+d58b97e7b28882f689a67019139cef86 *man/chisq.Rd
8ecbb478efcf4b0184a994182b5b2b94 *man/clo.Rd
-1e216ef8b7c72364a0e8d5d28a190fd2 *man/cloglog.Rd
+2ebe24734ed0652482c35da374b660db *man/cloglog.Rd
1aa6ee888bb532eef1f232c9f6a02b5d *man/coalminers.Rd
-c34d8e18e49ac22df6e9e9e0d59ca2a1 *man/constraints.Rd
-8d5b5435cea0a91ffdadc459fa8f7905 *man/cqo.Rd
-4b6e07b4fe4a71094c99e824f5b3cd91 *man/crashes.Rd
-3c35c47bd05e52f2b596563f05379cd0 *man/cratio.Rd
+5fdafee68a84d78df4a63faf2ad313a7 *man/constraints.Rd
+5d2914e0a13b6c6eb815e8286c5f36b9 *man/cqo.Rd
+30051aefddc0470b8a4ed3089f07cc68 *man/crashes.Rd
+7633b255b36ed442cd8fbcb4e86f2f0e *man/cratio.Rd
6fb9db2b54b6b351d5fa6ee4c1e0334e *man/crime.us.Rd
-301fe0cc28a36f05fa5a2b5895f0fa20 *man/cumulative.Rd
-03a50f7a29344538e0d0a64de82d8b46 *man/dagum.Rd
+5c9d818d5d737e1ed673bed73e32d356 *man/cumulative.Rd
+95759e81b76b715b322489284d72cbcd *man/dagum.Rd
69387a098ea4f01d352f9b3faafbd504 *man/dagumUC.Rd
-1f1a2e048bcc0061b8aa5f0d7fcb600b *man/dcennormal1.Rd
+fab5adfeb805c5aa673ed7377f4fd78e *man/dcennormal1.Rd
b2a696abb80c47fa0497c245c180ba13 *man/deplot.lmscreg.Rd
7f57d255543bc7d13dadf322805c99c0 *man/depvar.Rd
-40a6d820457d0015ca60fe3a752ca80d *man/dexpbinomial.Rd
-577b7f18bc996c2d977201415ecd56f1 *man/df.residual.Rd
-1bfcb86a014b0b758f50d132bd885679 *man/dirichlet.Rd
-47abfbb23c120dd2611c990f1a82b72f *man/dirmul.old.Rd
-56435343450179e964797e28af0437e6 *man/dirmultinomial.Rd
-f2e9b9b0c0aeb41d83fa5e689076fa91 *man/eexpUC.Rd
+c4b52569e78545a35752e1368c2c16df *man/dexpbinomial.Rd
+6c6f8430f3c65c7ba3ce883eb2c9ad7f *man/df.residual.Rd
+d21eb844e77835fb1d6ae46a2b112a97 *man/dirichlet.Rd
+825897c6d06a47e9ac809bd2251cdb68 *man/dirmul.old.Rd
+77a420a5a6ec80e1af4ed8074d516766 *man/dirmultinomial.Rd
+f22567676718224898e78ee587bfaf7a *man/eexpUC.Rd
fe902b6457a11d51c758018d9dad7682 *man/enormUC.Rd
-2ad791294f4220bacdd9dc1e07fb2e94 *man/enzyme.Rd
-fb32261e27bdbbf3719163d4981742ba *man/erf.Rd
-7a52af5919ffbe4f6491df743fd54d28 *man/erlang.Rd
-016203ada813723df52817147e7da63a *man/eunifUC.Rd
-a755d061d59cc71b7aeb44e7b224976c *man/expexp.Rd
-8a3dffebc0871a56f7dc9f9f3bcfd60e *man/expexp1.Rd
-f8ea6ce8d6fd230e8dcb593d09b50140 *man/expgeometric.Rd
+7008f7c3d5c5cb178b2ef1d6d2aa8c27 *man/enzyme.Rd
+a29f442ce60d8ac8185738242b4f49ce *man/erf.Rd
+159ea23d4b4c5e3d473abf5c7f7db841 *man/erlang.Rd
+e3446627fdcccb65abbeff03a109b6aa *man/eunifUC.Rd
+233a9e25094ef11cfc7aa858f2cc9c15 *man/expexp.Rd
+f5c104469adfcf4d21cb4c8c525c0850 *man/expexp1.Rd
+391ec14ac5da161f67cb01f91bf474cd *man/expgeometric.Rd
bba52379a93d8f2e909b579215811554 *man/expgeometricUC.Rd
-33ac709e79e8cac15aa1e7eda4f74bd1 *man/explink.Rd
-0c5cc8525c38f3ffb7bc8f880fe04a7e *man/explogarithmic.Rd
+99739438b960428c5c03a25d654942e8 *man/explink.Rd
+2fbb7566f2c74baa4051e3ce849c1909 *man/explogarithmic.Rd
347d45279f0e72bc8c2dab25ace2f28c *man/explogarithmicUC.Rd
-5cda1f3c70b2f647037c1ee4302efd63 *man/exponential.Rd
-f2c84a09c854f679856eccd4f4430e61 *man/exppoisson.Rd
+ac3f81c0c335c8b74b12507e1398edc0 *man/exponential.Rd
+bbd414bfb50f4be140ac6b66b29694cd *man/exppoisson.Rd
8e5ff25491af9631e681241ed305bf94 *man/exppoissonUC.Rd
-737c92f56c01d46e0219fcba779987fc *man/felix.Rd
-842a3ba37b78b88f1e726338dc883d85 *man/felixUC.Rd
-e89421f88d21f4867aec746c47b5e804 *man/fff.Rd
-66f1c7e1e2f78f76ed1b5b7e7fa259bd *man/fgm.Rd
+2cb7a7ffba4a046d1205295d75d23a18 *man/felix.Rd
+0bfa97ff4d9eead46aa1a822e2c231c7 *man/felixUC.Rd
+77038da711286677c94066f9326b2a20 *man/fff.Rd
+60dc65a9677bfa00c99ccdc0bd2449d2 *man/fgm.Rd
0c4744ec66aa44b14f5c3dd2d79856a1 *man/fgmUC.Rd
0f91dd411c054004631a677eda63db79 *man/fill.Rd
b929e2ab670eb59700bc4a1db07bbbc0 *man/finney44.Rd
-2a71cba3122f180deefc7eac6fd9500f *man/fisherz.Rd
-72f9c0c153b97d8c9ca99772e65b0d6e *man/fisk.Rd
-8a4d96c331c9bd0f8a630a672f1cc2cd *man/fiskUC.Rd
-f50d6af678d60e23e1285f5d2c6255cc *man/fittedvlm.Rd
-f0dd850a571209fb251db51db2b3d9a7 *man/fnormUC.Rd
-619e4551f1f29af1cd2e80db5d5eb98c *man/fnormal1.Rd
-18c339da4093664d14febbcf02f3a2b6 *man/frank.Rd
-cdfcf8fb1eb1799a197dd90a5a245d9c *man/frankUC.Rd
-6f7745678b1aeec1b8dddea8db6f83b3 *man/frechet.Rd
+6c1e3ad4431df4a8f949ec87d523de03 *man/fisherz.Rd
+0cab527544d71e1909b24a4be8a11f69 *man/fisk.Rd
+e8265b669964f68bedc38035251bf595 *man/fiskUC.Rd
+9b60e6d859114ce0c7a47f87456dd656 *man/fittedvlm.Rd
+e3ffaf55fb9c925685d1259eedc4fd3b *man/fnormUC.Rd
+2d7d7f37e64c9ad1d896dcea590ee4fc *man/fnormal1.Rd
+80974c2814d703c1c1d4eab536f656a2 *man/frank.Rd
+e6d4221fd51756a2881065dfc303edef *man/frankUC.Rd
+b60b1268713121e14fadc654729842ab *man/frechet.Rd
2716982ec8d58016f0d08737aecd8843 *man/frechetUC.Rd
-a064b35aec006934e5667bdbbedd1b97 *man/freund61.Rd
-47db6280a78b01c89bc971cc1be5bb3a *man/fsqrt.Rd
-13cc0e1a0a95d020031deddecb4af563 *man/gamma1.Rd
-152972ee5cd8c6d903ea1faba8d2b207 *man/gamma2.Rd
-bc93b6e6e71256cee791e31125b0b1e7 *man/gamma2.ab.Rd
-cf2ba12145a4e1626df9585d8fc72987 *man/gammahyp.Rd
-66237ca3553faaf444f36b592a1cfc4b *man/garma.Rd
-dbdc01466b43ed8302f46b2a63da17bb *man/gaussianff.Rd
-72a33bfafdbb835024823d29b540e3b4 *man/genbetaII.Rd
-988ec82425b040c71e0bfee8dcef00dd *man/gengamma.Rd
+ef897e4618c5244c2a59dde719f011d2 *man/freund61.Rd
+2b392459d756beb1213250d266c90076 *man/fsqrt.Rd
+97b73c666866f4daa6e5be208fb7fee3 *man/gamma1.Rd
+0ae1b94f9b6384cb4084dfd3a04861a3 *man/gamma2.Rd
+c0e3957aaf1b96e0a35a2ea95c023fc3 *man/gamma2.ab.Rd
+4aeaf1f465f97afa3305a6ed9dcb049f *man/gammahyp.Rd
+40973d8617d8769e4cf70b17d9b19846 *man/garma.Rd
+446118938e1448f78ddf8ae797495d60 *man/gaussianff.Rd
+3f6f548d8e09f030cf675128e5926bfd *man/genbetaII.Rd
+ac349c9adadfadb8cc9a574409c22956 *man/gengamma.Rd
bd63e15c3ac9ad8a8213d4cdc8bb3440 *man/gengammaUC.Rd
-47fd021736f77a04595d5c12e7ad4842 *man/genpoisson.Rd
-f626c2b3188a5755dc93112aa3bcbcf5 *man/genrayleigh.Rd
+c572a5a90988743fd046d5332bef6497 *man/genpoisson.Rd
+b1c3656df6f641f918c4e5bbd4fb239f *man/genrayleigh.Rd
c31e093e7b6e5a4a7959ba6404b85a23 *man/genrayleighUC.Rd
-bdd0441747900e5421d0fadaa907ed8f *man/geometric.Rd
+cc6be93cb89e2eec6efd5ded2448285a *man/geometric.Rd
78b7d9455f1eaa4572ff54427d77935f *man/get.smart.Rd
14a7e2eca6a27884e1673bd908df11e1 *man/get.smart.prediction.Rd
-48676987a2581858d5b2992385d29134 *man/gev.Rd
-564d66518a6ec5d2a303e16814266d8c *man/gevUC.Rd
+c8382766873c747985f8b7fea99704db *man/gev.Rd
+e4c037fc281c8a6948962264493baf94 *man/gevUC.Rd
690b69d50e92a781720cc547dd22c3b4 *man/gew.Rd
-b4acd939599553a8f5fe60461c1d1940 *man/golf.Rd
-70f0f28c69b1f390c67fb4bcce125da1 *man/gpd.Rd
-05ffba31706bba09ffb7a1d7a18e1a4e *man/gpdUC.Rd
+ee5c919188e3d8ad589ea8d98ddd3ad8 *man/golf.Rd
+5cc8c0cabb839b34f4f37de4b57f4428 *man/gompertz.Rd
+3affd7c0ae94702950fb738253059a68 *man/gompertzUC.Rd
+81d287969447618149d22113fa118d40 *man/gpd.Rd
+9c77b9e29e9364865bfd8bf0c7143437 *man/gpdUC.Rd
d262446f558ffbaba51cc8ff86e5ab1a *man/grain.us.Rd
-34ff9c06370afeb74babd58f0b8726bc *man/grc.Rd
-63d054be8dbae4bf35a7b9b6992627e5 *man/gumbel.Rd
-fce5cc2b341eb7e67c00f8c0d91ea287 *man/gumbelIbiv.Rd
-c3115a24f1bcd264b17912ed76c8fdb6 *man/gumbelUC.Rd
-d60aa16831b87c86aaa5648b6c4afc76 *man/guplot.Rd
+1daecbfc273e25de8e6811cb7803c788 *man/grc.Rd
+3ffdad5594e4eec6062097a5c7c974e7 *man/gumbel.Rd
+a6df41a1cc82c1744cad46ba89a5b161 *man/gumbelII.Rd
+2127127ee0e62bb2cefe05462bee7c39 *man/gumbelIIUC.Rd
+1f202bf7be31c71a9d9982b7ef477cc9 *man/gumbelIbiv.Rd
+977ee282217151a6c5b83867eab32573 *man/gumbelUC.Rd
+fc6b1658cbcb87054ab516552b6875f9 *man/guplot.Rd
c1a9370d3c80cd92d9510442da0ff940 *man/hatvalues.Rd
-00b132289191052ac14659de9ab936fc *man/hspider.Rd
-b5224b8a3e3ed7eae77129374e17c95c *man/huber.Rd
-bbd60b4a3ab257638df3ca1d0e99df63 *man/huberUC.Rd
+5914e78d3a007ed9338d2a94e07e9f36 *man/hormone.Rd
+57a5f4c37dd40a74161489df6759fcd4 *man/hspider.Rd
+769c424052e85555142f8c4551282fa0 *man/huber.Rd
+fe68021175fa4c20ade86f55db7b5443 *man/huberUC.Rd
bb9248061e4bcf80a1f239192629dd44 *man/hued.Rd
-7233194700e1afd90475317e4a23c831 *man/huggins91.Rd
-247e7d8e05b06904ee14cdee0c897d42 *man/huggins91UC.Rd
+dd719768426a76fe0d017f0b1975bdcb *man/huggins91.Rd
+80c6c747a9f873fa6e8e40565a0a9665 *man/huggins91UC.Rd
d44f3df87816b5cf0f1ef98315300456 *man/huie.Rd
3cb4fc1b3a7f1a6bcf7822219ac25525 *man/hunua.Rd
08383189cb05fe01a3c8a5fa2e2c78c5 *man/huse.Rd
-dcd7c3b73c0e9437f777ea65f25f23c3 *man/hyperg.Rd
-f134ace4dd0689809500d58933cff6dc *man/hypersecant.Rd
-8d18339270dbc32b70c105c3500eb412 *man/hzeta.Rd
-1c82e233c218a874edc3b00547d8ee1b *man/hzetaUC.Rd
-9c03dfc0921099fdae21e7e340ac3cc0 *man/iam.Rd
-7266e5dba641098cd882cb62a8e33244 *man/identity.Rd
-7736014b1a24efd32b9f35eda358fe5e *man/inv.gaussianff.Rd
-941470d5ff5e3a83089d1ec1af026f35 *man/invbinomial.Rd
-15700926fcf2de393742f4758736b2a3 *man/invlomax.Rd
+cd473192d2153433bee1530bce881972 *man/hyperg.Rd
+34ba5a500d1e9395c1e6761334232c0e *man/hypersecant.Rd
+63751a4f55b918aad163a53994a01a07 *man/hzeta.Rd
+c3ca61cb9f3d309e8a08dd528de7d994 *man/hzetaUC.Rd
+1e31e772997c2b18bc113d77e1e0e176 *man/iam.Rd
+f4dd596dc646925e2c68c9679c799472 *man/identity.Rd
+da60bdf0881d1c9ba435a6455fe214bf *man/inv.gaussianff.Rd
+085be4050d440ab7aa197da463927e20 *man/invbinomial.Rd
+00895467cdcea7ae66dfeccb3d84366c *man/invlomax.Rd
01cb2a27a9c0eae7d315f3ca158749f5 *man/invlomaxUC.Rd
-91301add8e408d69c13e469769c8370f *man/invparalogistic.Rd
-c0161485e2448b7abdfd3da5ab738c0e *man/invparalogisticUC.Rd
+8d0593ef6ef39c02009383bc4e5c2dfc *man/invparalogistic.Rd
+6a8c2453b40d2f3badd4d9c0bb67d907 *man/invparalogisticUC.Rd
+1bf97bf1064b8487d9b18f648a2249f0 *man/is.parallel.Rd
a286dd7874899803d31aa0a72aad64f2 *man/is.smart.Rd
-2e3e9b010e6c48ebc38720fe7a1d88fc *man/koenker.Rd
-8fe841741b94002d204ba682bde54c8a *man/koenkerUC.Rd
-27b379846522a3e3229ff6aefa3c6791 *man/kumar.Rd
-df631f857d415cdf7ae3a39e05a230ab *man/kumarUC.Rd
-b103b755b50474935e8da3874d923792 *man/lambertW.Rd
-59c33ab57e8c8cfbd29de2af8a14c8d6 *man/laplace.Rd
-b8f463b8e776f6a1f604bc5da92aca37 *man/laplaceUC.Rd
-ed0afe39738f1712b3981c3618c4f913 *man/leipnik.Rd
-2fd907a10ab430f8a2e2172bbe8cdec2 *man/lerch.Rd
-208f4d3827953d3195c4cedd95b9c95c *man/leukemia.Rd
-5c8c39fee1abf69282ea305ef6140a30 *man/levy.Rd
-24240b2f56289e1a3240dafd78d6212a *man/lgammaUC.Rd
-2b04a3472ef0fdcdea239dd3b3efa293 *man/lgammaff.Rd
-7fec5c64cf46a14b918a919590025ac6 *man/lino.Rd
-2ef824a6f01bef38ed0076a1015fae79 *man/linoUC.Rd
-c347f3d3752c3dcf7d9b614b3f62be6f *man/lirat.Rd
-4ae53304c7e161a7979e2dd08e74fd71 *man/lms.bcg.Rd
-98702304ab240fd2b82ba9a32911903e *man/lms.bcn.Rd
-ace0ac75d6a275e6814174949f40be92 *man/lms.yjn.Rd
-ee69ac28aaab7887c656b857af21ffd2 *man/logUC.Rd
-c362d03bf3e2c4c24f8e0f46af093a09 *man/logc.Rd
-f3d3ed74f201143d09a98f938b684c6a *man/loge.Rd
-e5c36efa7e692fd32de85fd9c4a347db *man/logff.Rd
-5b7b7b672758091d20d8ff0f358f2550 *man/logistic.Rd
-6f266ae1d6b63a114aa4b8ae6ead9ecd *man/logit.Rd
-1da3783f1662d799690fdd081f721ee0 *man/loglapUC.Rd
-3ffe1e60703b15f818cd7972cd8f44a9 *man/loglaplace.Rd
-8232a213dfc8899703f6e57664efae69 *man/loglinb2.Rd
-dcbd827fd3586f46fc4ca1a1495a9ea1 *man/loglinb3.Rd
-dd9c84ba9c07cc9414175b41d94fe1f0 *man/loglog.Rd
-ff85df21653d22ed4cbf3138f82049d8 *man/lognormal.Rd
-aad78245c7c13be5d22efbff8774adf8 *man/logoff.Rd
-0e3d32a8a20c59a5d7c7a4b1e9afb7bf *man/lomax.Rd
-1fa1bf8d11541be8d48de2ff954462b4 *man/lomaxUC.Rd
-138808d36f9fb37444e28e0d2c426dd1 *man/lqnorm.Rd
-f6ce6b9c84be7adf18b37a78ea6622b6 *man/lrtest.Rd
-8b21946b3c21a74d758e4b18117c0000 *man/lv.Rd
-528f457d3ec33f6264ccf05670fac457 *man/lvplot.Rd
-af30767e3ab7bfb0bc809409d7f39e84 *man/lvplot.qrrvglm.Rd
-15d57ef2c0a922cef23f2d25cda5c3cc *man/lvplot.rrvglm.Rd
-49c02a1e6bf68c88e2357f717d929ba5 *man/margeff.Rd
+b829d9d0aa0947644b415535a4ed5be7 *man/is.zero.Rd
+30a15dcaa326928e71982bc7306a79cf *man/koenker.Rd
+50dded53a59735a07217074d8228393f *man/koenkerUC.Rd
+0d9800aa2eb316c662b36593ac2c74a6 *man/kumar.Rd
+8756e8c50075f92aeede56aedff7d2c7 *man/kumarUC.Rd
+6f2f641c0cb15f24ec1777d2db159459 *man/lambertW.Rd
+0c7294d5f5b568a23c2634a86a07f62b *man/laplace.Rd
+7310aca7179d6f31d9e0da64944e8328 *man/laplaceUC.Rd
+f35539501667121c53abd0b1e448b150 *man/leipnik.Rd
+c93045a9f05888a4675ba3d48e70e7e7 *man/lerch.Rd
+8c7fca39c92e5f79391a7881a0f44026 *man/leukemia.Rd
+13b2cc3332ac9559d5d47790a8e206e1 *man/levy.Rd
+5a35593723af5ff2e544345d4e6b868b *man/lgammaUC.Rd
+42d40282918efa270ed17f4bd3eb86a6 *man/lgammaff.Rd
+fd33ebb21f7ab741392b8c15ec54f5e4 *man/lindUC.Rd
+7ca83cec8ecb2fd661ca66bba89dc411 *man/lindley.Rd
+59375533957aa583acf12b0b44b0d718 *man/lino.Rd
+9c786943dcad40f95f4dddd3ff0f37db *man/linoUC.Rd
+9a021048d7a9c594643d91d3d4b234cd *man/lirat.Rd
+fc9016da8aeb1d1bb210ef7274f9da3d *man/lms.bcg.Rd
+688d994bbe84b5ed2b1cc962037f2721 *man/lms.bcn.Rd
+6e2e5248c45084fbcb0090b86f7f3f46 *man/lms.yjn.Rd
+0d35403673c679344da32f978a2331b2 *man/logUC.Rd
+f0502f0505925ca9d48e6e3994f278a0 *man/logc.Rd
+8e5086b9f1709bb02e1ea438d6c88297 *man/loge.Rd
+2be2b998e9b4d3d32e72f2c9e0662273 *man/logff.Rd
+14c728f5bfd8968fc74390f1cb95dc44 *man/logistic.Rd
+74e267e8cbc018f13583babaa3ab73cf *man/logit.Rd
+1f63716471926cf3baae3150c94beb74 *man/loglapUC.Rd
+a570e779c1f0741c4196a0982fdeddb1 *man/loglaplace.Rd
+43012be50bf4ad3610f50a3609f80b20 *man/loglinb2.Rd
+54e34264cb73f9d54c4c412af81c17fe *man/loglinb3.Rd
+f1c11784dff391acf166a8986d434354 *man/loglog.Rd
+4c6053656b2fe0276fbe1a99b0174238 *man/lognormal.Rd
+e859c980e26eb3e483d0f3648b502d13 *man/logoff.Rd
+929d46b782f13e591d4989724343cbde *man/lomax.Rd
+06ca5cde9d161d2320f87f6b2fc04aa1 *man/lomaxUC.Rd
+950443559c152cc441b4b08dd5c7e12e *man/lqnorm.Rd
+3f48084e64cd4663677fc8df8e4ecf3d *man/lrtest.Rd
+49f8def752351e1f34beefea82985ca4 *man/lv.Rd
+c066460c787fa701788c400e56edbf80 *man/lvplot.Rd
+f909e728550a7e0e95f17ec7d12d0a85 *man/lvplot.qrrvglm.Rd
+30f7cce914cf36078392189f12c0670e *man/lvplot.rrvglm.Rd
+9aae7ea097d087c0acfee0b7358a997e *man/makeham.Rd
+f459ac6b3f9453e0fb6cf4dfce393b64 *man/makehamUC.Rd
+a836cdea396e90233979a1065e9aa401 *man/margeff.Rd
b5c6a5a36ebe07a60b152387e8096d9a *man/marital.nz.Rd
-f08033557088369199e94547b1740580 *man/maxwell.Rd
-3fa2c9ebae9651becc102930b49d03ca *man/maxwellUC.Rd
-10df4196cca726f8787c0c5f5656e3d0 *man/mbinomial.Rd
-7691a2cfdeb641439b0cb86959d6632f *man/mccullagh89.Rd
+eae8c8d703abffa56be56cc88743822c *man/maxwell.Rd
+e01c8beb637aca15dd5aaee412b5c3ea *man/maxwellUC.Rd
+ad6f24fe862c9936ea99033ba89d4fcf *man/mbinomial.Rd
+d0ba1cb515890aa57df222840a8ba7d4 *man/mccullagh89.Rd
4d8d0f37dc8249d00e52283764534e98 *man/meplot.Rd
-3660487df3e8da3023fa94195c717e06 *man/micmen.Rd
-9a192c889be24f7bdd6176f9aca6744a *man/mix2exp.Rd
-032b58b8746fb0d18ed355acd28afa7f *man/mix2normal1.Rd
-4aaae69710cd08f08bb7ce432cf2108d *man/mix2poisson.Rd
-1d7e090a54f5524e6fe0711bb942be47 *man/model.framevlm.Rd
-1ba41606eeea0ea3cd41bfc2098cc35d *man/model.matrixvlm.Rd
-febba2e46a2084aff84e8c76a388e400 *man/moffset.Rd
-dde2999ddb57cc4af821b2d2e2b65251 *man/morgenstern.Rd
-056cc7964ecd77586d22a375ad879322 *man/multinomial.Rd
-29ce3642cdb940b4bdbba7f6173a6a60 *man/nakagami.Rd
-d87f98ccf030b9925fa27475890cd27e *man/nakagamiUC.Rd
-38c45f8d05c910a957456dcb22c2cd4f *man/nbcanlink.Rd
-7a211d0cb765afa12ae6579af7d867d5 *man/nbolf.Rd
-285532c1c7ad5b17bc7ad287bef549d8 *man/negbinomial.Rd
-4511975c94fcfbe834ba7ca3e457c98d *man/negbinomial.size.Rd
-4c8b84458e8ee97cf8ec3189da73a78d *man/normal1.Rd
-6df574ccfad885dcffa172e12a14904b *man/notdocumentedyet.Rd
+3b5d203389f18b3847122d3a78152f21 *man/micmen.Rd
+49ed6c8e6d160b323f1f2acd75d5daec *man/mix2exp.Rd
+2a272b10b746642a9ee5bbc6cbfc9511 *man/mix2normal1.Rd
+908970d91303cee973dba82825fabd4b *man/mix2poisson.Rd
+815499481774f0be63eda5da52650954 *man/mlogit.Rd
+e41c539196b04b87d33595a73efef01d *man/model.framevlm.Rd
+73bc45aa0257f78953611c9fb6daba39 *man/model.matrixvlm.Rd
+85d73b769924c10e91065f87bf237fb7 *man/moffset.Rd
+7184b188c705a6e326e454f859e76f1b *man/morgenstern.Rd
+a7808eda65b29d6da616e5ecaf83b063 *man/multinomial.Rd
+0ef36351d132ce1f91580c5f71237f39 *man/nakagami.Rd
+c69bfdd1afbf8ea05b2d37c27f2b097b *man/nakagamiUC.Rd
+f18d4e0e5edbaf7f33417e87e8b9317c *man/nbcanlink.Rd
+effbf6636c9e903cc25b4428e7bc3b60 *man/nbolf.Rd
+7a99bf77f55ae58fa6036072c2685258 *man/negbinomial.Rd
+ca68c753f150159fdf7c91f53e800b4d *man/negbinomial.size.Rd
+e0b6546fb8d6bb8a5e2506dc89819178 *man/normal1.Rd
+2e6a59a3d8e48a34dd04a2fda189a23e *man/notdocumentedyet.Rd
8a118515f4955e425adcd83f7da456ec *man/olympic.Rd
-1ca5bd6a9ee667125ba379e48e66c99e *man/ordpoisson.Rd
-9ecbe9ab6cc7d40f41f10a71fdae5996 *man/oxtemp.Rd
-ae5c6514e182459fe0d59771b49246c3 *man/paralogistic.Rd
-e82353ff6171e11bbeae4e3687bca231 *man/paralogisticUC.Rd
-97dc353975a803fd33bebd083c85713d *man/pareto1.Rd
-3c9ba189fa4f71114f3aa7248c169951 *man/paretoIV.Rd
-b89db00a67be3a3aaa3095f3174e831d *man/paretoIVUC.Rd
-66f9463188664956ca69d58bd11a0e51 *man/persp.qrrvglm.Rd
-53a43e65f00420564ad783888f356ff7 *man/plackUC.Rd
-c542d660e94860e165d2945a855eae24 *man/plackett.Rd
-49808aa704ee72fb230c99b656d48d0b *man/plotdeplot.lmscreg.Rd
-768d300d2a478398c5a77549922caa97 *man/plotqrrvglm.Rd
-9653f109e0c0c5191306070e0f2b8ac9 *man/plotqtplot.lmscreg.Rd
-9ae405fd77c85cab2a55f92664b1cc67 *man/plotrcam0.Rd
+0c48bfcd8e3d21e919b3c0f55fd4d8e2 *man/ordpoisson.Rd
+c0074d4c77ded24e50fd3fe3668a4011 *man/oxtemp.Rd
+21cf3000f5edd5c31cf53cb0c9ae0f7c *man/paralogistic.Rd
+7aa703a30747006ad5b2628fd5e593da *man/paralogisticUC.Rd
+2b2712df539d0a0738ac618669767905 *man/pareto1.Rd
+26cbc4f613bbbd8c907337e3b203ae07 *man/paretoIV.Rd
+4874d4d01dff5685441b03e23b02746c *man/paretoIVUC.Rd
+79f129c66a04bef03318c6efe6d6aaea *man/perks.Rd
+a489c7a05dccb46a72f15269c8bdb5ad *man/perksUC.Rd
+bb672a5c452bbe1a01fe06657dccb7d4 *man/persp.qrrvglm.Rd
+b6d928375ee9738785be7ec7fa66d277 *man/plackUC.Rd
+1312b1dda42c2f696a2824e2bd0e2ad0 *man/plackett.Rd
+791d04a5c3a3bc514bf0ed1fc639f8ab *man/plotdeplot.lmscreg.Rd
+e6eaf56a6f7b23ede6cbd92dbce502ed *man/plotqrrvglm.Rd
+958dcd119ee66e5d5318c4cf19f024f8 *man/plotqtplot.lmscreg.Rd
+45ee1e3b4fe0a2577f5ea8732f1db0f8 *man/plotrcim0.Rd
db9c5b2ca7fd4417d4d88d02317acebb *man/plotvgam.Rd
72bade4a008240a55ae5a8e5298e30b8 *man/plotvgam.control.Rd
aa55e676b3fd0fab0f1aee26ab9fa6de *man/pneumo.Rd
-de61bd1899e2bd101d3977d2e25f163f *man/poissonff.Rd
-aea0d6dabf75a88fc5bbf4cf77fef7ec *man/poissonp.Rd
-8abbf4f53f755542e7197830d026f514 *man/polf.Rd
-a2fb4efb4037aaa2362579d73e78defa *man/polonoUC.Rd
-2d239f593b34e2342faaf3ba2e8f55c2 *man/posbinomUC.Rd
-67c1153ac99b572401e73d68f665b2ab *man/posbinomial.Rd
+1cb05da296ec9389de210df4d27e71c9 *man/poissonff.Rd
+dab0255f3b6f88ca8362af2570311a2e *man/poissonp.Rd
+ed23d712bc7ffe5a7f70481774e1e827 *man/polf.Rd
+2b1a116706ced6399a4248853e001d89 *man/polonoUC.Rd
+f8d8123a109be7db427120a4b67513e3 *man/posbinomUC.Rd
+a6c09d4d735df69c71432b9b801216e8 *man/posbinomial.Rd
6ec345e5d20c36bdde7b7d09c9b71893 *man/posgeomUC.Rd
-a5f4a74e36b56b1b6799650c38a95f22 *man/posnegbinUC.Rd
-ccfe5f42d992cf7aa5f5309dade4aaf5 *man/posnegbinomial.Rd
-0e2ea2f46537b34ccc6603fe56303983 *man/posnormUC.Rd
-c4f9abd34a4cd9ea5b8a6fc3b88abd83 *man/posnormal1.Rd
+d14c926ed9841f43e6ace38ca9a7529f *man/posnegbinUC.Rd
+ac1f3ebc8db196c11356963d4f82d509 *man/posnegbinomial.Rd
+4d39085d9df2a816cce2efdc10af0825 *man/posnormUC.Rd
+7b1ca086982454d5cedb01496c8c8cdd *man/posnormal1.Rd
bfa5a34fbeeca1ee107e2fc332f1ec1a *man/pospoisUC.Rd
-6cde192a6dbad131523057890c565ab2 *man/pospoisson.Rd
-95386d432e396127192e5516a35059cd *man/powl.Rd
+c33e0546ca2429e1a4bcb9a56ef992e7 *man/pospoisson.Rd
+2fdf20b0d607f422c2b01ea15f271652 *man/powl.Rd
f5ca83cbbe57ce6a7e98a0318ddc6aac *man/predictqrrvglm.Rd
-10003ea86273bd156fdbd6990c5f80d5 *man/predictvglm.Rd
-d2b5e03b84a6c8b6ba9553155445c694 *man/prentice74.Rd
-1de751c9f36f6a6d826458e0006acf36 *man/probit.Rd
-e7a5908f988925eed1f176d91086b578 *man/propodds.Rd
-dc7a643eba4c2ac7bbd842ed27eb1023 *man/prplot.Rd
+ee617c9486f9db20894440ae145b1cf9 *man/predictvglm.Rd
+f1cf2e37dcc09fba04770ecb055cf646 *man/prentice74.Rd
+f26232b73e5f0c2f323d019ba9e46ada *man/probit.Rd
+811cfe4a15b3b140c48d930e3172a195 *man/propodds.Rd
+ccdfc3f7df34475385a243eae0ab5877 *man/prplot.Rd
de570e252375d7052edaa7fb175f67eb *man/put.smart.Rd
-602637ecc0fab44f08f45caab838f1fb *man/qrrvglm.control.Rd
-e5ac6fc23dfa77497bbfe05831e5ea33 *man/qtplot.gumbel.Rd
-0636a2c78899c1eea2111afcb48617d9 *man/qtplot.lmscreg.Rd
-64dceb3461595b09595b483f72ac8b42 *man/quasibinomialff.Rd
-85d05c50101b02eb35a1e31d75226c05 *man/quasipoissonff.Rd
-013fb5594d2df84c9fc9aad2dd822070 *man/rayleigh.Rd
-eeb74d98864573758cfe36ba13ef6ef1 *man/rayleighUC.Rd
-03fb6a7f9cfc570ad5fd1bc59accc905 *man/rcqo.Rd
-215e0a6f6611334b2b9ed8a35595227b *man/rdiric.Rd
-ac9770dd82570248526fcc6fc5736e9a *man/recexp1.Rd
-b482b0fef9752983d7f39154c006d7d2 *man/reciprocal.Rd
-5801e4142fa8b2f82ee50cbbf51d6955 *man/recnormal1.Rd
-38b36bb14ee58c0f6441f59700842cf8 *man/rhobit.Rd
-c4d52486cd29a6b05426ece0496dbf0c *man/riceUC.Rd
-a80124978dea921b2b0f8f5ac7187bf2 *man/riceff.Rd
-211f962003276a0a032c94b847bfc426 *man/rig.Rd
-28a7ee11dedcd60712d830cc36f8c208 *man/rlplot.egev.Rd
-fdf98b1b6024d9702c1ad361d87169fa *man/rrar.Rd
+438d995cac8b7eae527bf97188e97f92 *man/qrrvglm.control.Rd
+ddfc6463c5266b7dd79c7a7e9d3f8f6c *man/qtplot.gumbel.Rd
+7894f8d45225244008021bd30565ea32 *man/qtplot.lmscreg.Rd
+eb986116765a0a7229e0988a343f1b6b *man/quasibinomialff.Rd
+c2efda0141a3df852b775baa18af0c7a *man/quasipoissonff.Rd
+67da92796b1e1d1f8866fee2c8cf4954 *man/rayleigh.Rd
+02bfbc64253593edfa891a19f33acd89 *man/rayleighUC.Rd
+c3854f1526ca08f961489ef371183939 *man/rcqo.Rd
+1d9601bd76b8c0cddcf567b144b5ef89 *man/rdiric.Rd
+385bd032acb1f2925c49a7748dcb8631 *man/recexp1.Rd
+2af6888fb0758a9fdaf45fc72f844724 *man/reciprocal.Rd
+d3f671ea06066c9bee61317ace112d66 *man/recnormal1.Rd
+9389504a7c7716cb9b183322290b504e *man/rhobit.Rd
+b70c93ab6124de167a4ccab2f8fc2221 *man/riceUC.Rd
+7471692a618c57fe5f5137deadaef4f7 *man/riceff.Rd
+5cfc734589e404f286ce8cda342344bd *man/rig.Rd
+258a5e119f601399b04d4dc51ce2e4ef *man/rlplot.egev.Rd
+2b81fff5b22c9f72773a61d5e39a8747 *man/rrar.Rd
ed93c6e06d519ab3ddb92c73cf62bb67 *man/rrvglm-class.Rd
-6a69f5dc095de3eb11b473db1f52d481 *man/rrvglm.Rd
-a5a699bccdf3768b9bc425b410d4328a *man/rrvglm.control.Rd
-aacdffc764ae399ea515751128ff32fb *man/rrvglm.optim.control.Rd
-b5936590eb374807b15d3d6f10257496 *man/ruge.Rd
-1aa08eedd0a60614932bf6916e600e9d *man/s.Rd
-225c2d43e9c5143d0e6e0fab79a22439 *man/seq2binomial.Rd
+9a90884892c72a0d48bd33ea0a13e4ce *man/rrvglm.Rd
+b104826904e5b6dfd293fb60aaa4dccf *man/rrvglm.control.Rd
+493070deddef6815cdd2de211f3a65db *man/rrvglm.optim.control.Rd
+ecc44804896b8f3d4a9d469a952fe9a6 *man/ruge.Rd
+b8b40b0a50bc2cf97bfc45b4b250a7a4 *man/s.Rd
+49804a5ab4ef29fd6b394b9fee5b18ac *man/seq2binomial.Rd
71367fe3b494a45c98f9a96e1fd791e0 *man/setup.smart.Rd
-fa349f195a44efe47ba19726c6d96725 *man/simplex.Rd
-0b224135695156ba53178b78ba64690d *man/simplexUC.Rd
-407c6118fc59774474e3a15832de6c49 *man/sinmad.Rd
-d406cb5ce0d23612220d9011346b96e0 *man/sinmadUC.Rd
-6e0c8526ef9dc5b8088eacec6d611448 *man/skellam.Rd
-3b158a36468b4e9cb6ac33c6ecb7e59a *man/skellamUC.Rd
-878eb152f75438a8c6d55ae6f56f938e *man/skewnormal1.Rd
-9aef1e982d65a1ae4b5a993a54b60f7e *man/slash.Rd
-0fbb31668407aa16241824e2c17339a7 *man/slashUC.Rd
+22fd8f8f7a559acaecfbca2c6dbe5818 *man/simplex.Rd
+7cdf80a6cdb171d1f6f9ae200422b159 *man/simplexUC.Rd
+198cfe54eeb201c3e5de6c16c14afcaa *man/sinmad.Rd
+077ac803be0b8fe390a59faa5a32523d *man/sinmadUC.Rd
+8555a29368f14ba2a2ead5344f4ae716 *man/skellam.Rd
+4cdec195b127858706897733934dffc4 *man/skellamUC.Rd
+094fd596b913d88f9941bb26396d4b72 *man/skewnormal1.Rd
+0c30d059794a31ec06e43da1590496cc *man/slash.Rd
+9d45778b7f284934351777b4b9686c50 *man/slashUC.Rd
1ed10e28c013e2e08ac5f053b2454714 *man/smart.expression.Rd
163cdb3e4a225aceee82e2d19488d56e *man/smart.mode.is.Rd
2b68a9e20182e8892bb7be344e58e997 *man/smartpred.Rd
-bd869816cc0a7a1af02285c8ff7b6fbc *man/snormUC.Rd
-fc8592ac8305dddbed31b11be3b532b4 *man/sratio.Rd
-6842d2562b09bd347aeb9e7cdb55f11e *man/studentt.Rd
-5585a51bdfb69f8366df3eb46b950885 *man/tikuv.Rd
-da0473cfe60820a64e74d4e2d7492927 *man/tikuvUC.Rd
-f11402d98706790ede99940cb03aaccd *man/tobit.Rd
-dec960a58993b1941f7f0507673a951b *man/tobitUC.Rd
-7b79a4a3bbe4fcd9fa6ecfa66fa98ec8 *man/toxop.Rd
-1e9fb945744309465b729dceaf2b9e47 *man/tparetoUC.Rd
-d656850a7fba6056bfcaf07a00510110 *man/triangle.Rd
-8c327c816d9d56403d617a32fa704e9d *man/triangleUC.Rd
-8fb0fbd98a56b1afced6cdceabea5c34 *man/trplot.Rd
-5cab3d39bc52ba50848cdfcf64199d4c *man/trplot.qrrvglm.Rd
+6efb329ba91500aa45ba2f3706e1f331 *man/snormUC.Rd
+3849f780d823a1a0aa67bb65ac35510e *man/sratio.Rd
+9b172b6ef80fc2e1b5b00b3a0aa1dce7 *man/studentt.Rd
+ed3bff9c47db0c26084efc1a74454f2d *man/tikuv.Rd
+d6c0077cad16ec5218cf5ca71898105a *man/tikuvUC.Rd
+076bb1dac7293c1de7f2ecd9f5f5fec5 *man/tobit.Rd
+95db69c0da2ceff7fcb86d6893a861c9 *man/tobitUC.Rd
+f5ad31498c55094320a6c5f8632a3ff6 *man/toxop.Rd
+d4859684f7ab3f490a5f7279c5a1bf0b *man/tparetoUC.Rd
+39423c1ea32c5ba0d4286b815ad2712d *man/triangle.Rd
+a262cd49e16acd6fb583cb2aa0fc5a94 *man/triangleUC.Rd
+304a7f28494e6f4a3f6e6bb42d02671f *man/trplot.Rd
+d7e22cc248287250fe6308ffdfc9e0ef *man/trplot.qrrvglm.Rd
50ada9ecd189456ce9f218d22b49089c *man/ucberk.Rd
1fc91e082e70251f46af4261f7d48f78 *man/ugss.Rd
-e9c44e172adbcba6a3818e74b180d343 *man/undocumented-methods.Rd
-8d8835dd870d94aafa3259ecd2568337 *man/uqo.Rd
+ff424ad63653087fd40315ae0763f0a7 *man/undocumented-methods.Rd
+1dc06807944c2ece254ebbcd034a12a5 *man/uqo.Rd
f9eeeaeacdb82471c5230468b61d7bdd *man/uqo.control.Rd
-986f3ae218b563bae795b67131082609 *man/venice.Rd
-609b06037613c666ba82ef99fe67b97f *man/vgam-class.Rd
-6b001b0875c0a2b48f0bb61c683acdcf *man/vgam.Rd
-1d53ebf6fecfac1f339841ef9b3e8dac *man/vgam.control.Rd
-b2bdeb9d2a6e9c2e7b8964d334b4378e *man/vglm-class.Rd
-fc5b02dd911753d18db07a25d7da3352 *man/vglm.Rd
-3332b24703a86d05ce7f4f17417b6e15 *man/vglm.control.Rd
-d7e7f317461e888a57ee1082db178328 *man/vglmff-class.Rd
-e12f38d6fc651548bc7badbbee4b6d49 *man/vonmises.Rd
-060df7afe140d1ef3b498e1492a9c1bb *man/vsmooth.spline.Rd
-969885cabc2f70c78def5cef9621a648 *man/waitakere.Rd
-0a974f438d1c92859d87f28896768b29 *man/wald.Rd
-8b94fe25920b5a05d4030b30f679176a *man/weibull.Rd
-9e552190553e5c08cc22b518d808fb9e *man/weightsvglm.Rd
-f8652276dedb724f7baf7234f37ad2cc *man/wffc.P2star.Rd
-f188fe990a99ec6a88e15e3ae69f1b01 *man/wffc.Rd
-ae1ea0d10cfc8cbdee70a460c590c823 *man/wffc.indiv.Rd
+f78da1e2ac9068f2781922657705b723 *man/venice.Rd
+5d0f6c9e067bd6e7d44891427c0b47ff *man/vgam-class.Rd
+d3dec49d63432c4e702ab28d994663c1 *man/vgam.Rd
+31977aad5fed703735d83dbb04524345 *man/vgam.control.Rd
+3901a430c138688b96027a1c8a96c4fd *man/vglm-class.Rd
+6e640c3fde4c99c2984a4c7612c019cb *man/vglm.Rd
+ad5684fc42b1f1f5cc881f6e7d49019d *man/vglm.control.Rd
+f57f8703ffce527c50bc9297fe5dd94f *man/vglmff-class.Rd
+9d43253faca810a9baa7f654ac7792b3 *man/vonmises.Rd
+33d0f6c4c20377940add441c4d482e78 *man/vsmooth.spline.Rd
+c498f29d7fc8156fd345b4892f02190d *man/waitakere.Rd
+e4d3a522ebb0edad3f9f8261d8f40d93 *man/wald.Rd
+651416c8a31226aebba2e11b5a091cdf *man/weibull.Rd
+e3068604e1a1986a32e83c891782a70a *man/weightsvglm.Rd
+a1fd4bb94558a6ebde1ed7e07a717956 *man/wffc.P2star.Rd
+cdd118028d235ad90e2351163b9ac670 *man/wffc.Rd
+31c7ead90337df892da898d01168b4b2 *man/wffc.indiv.Rd
ce03a749bcb5428662ac78b85bd6f08d *man/wffc.nc.Rd
664d89e742974a4be71a459a68bbfc80 *man/wffc.teams.Rd
655258cff21a67e1549b204ff3d451a5 *man/wrapup.smart.Rd
-5c74881dfc6fd864449dfd0d8c720386 *man/xs.nz.Rd
-18bd4b883004bccce4c1d1c5d80bff98 *man/yeo.johnson.Rd
-e397c38e07fedf212775293198657da3 *man/yip88.Rd
-8e94dc10a59629c0f9147f940a371a84 *man/yulesimon.Rd
-1475d89bd0a33754d7f91bafdd340299 *man/yulesimonUC.Rd
-f64c6703e51cc24766ce5dc033b0ac3e *man/zabinomUC.Rd
-7f8fef37516d696a7b685f570c6cb202 *man/zabinomial.Rd
-a7788666a974919ff5b10692bc08a38b *man/zageomUC.Rd
-ffb759533fb11daa037a82826284c9d1 *man/zageometric.Rd
-acb519fd6da2d0bb67539f963310618a *man/zanegbinUC.Rd
-14f25ecee890bda5089e1b21158ee374 *man/zanegbinomial.Rd
-9de32f6cc8bc406ecdfa00d343b796e6 *man/zapoisUC.Rd
-a2d4334c39fb98b5612df57a414c7bd1 *man/zapoisson.Rd
-109b41d0929fdd2fea23bfa1ed23207d *man/zero.Rd
-4e19a9181d3ce167b113abb5712489bb *man/zeta.Rd
+114a4ad6e5056896cd22d7558fc5b520 *man/xs.nz.Rd
+bcb9181a6ca8398fefd44de6552a8938 *man/yeo.johnson.Rd
+e3116eb4708dc7d3a6afdb76e3705284 *man/yip88.Rd
+21a90fbde0228b4e74bba93b50300b54 *man/yulesimon.Rd
+a6128b966f2d5d6df5f36b11bc2c3607 *man/yulesimonUC.Rd
+702b59c0ff9a17b02e63efbe7451ef34 *man/zabinomUC.Rd
+2f6dffea54d337e1ed60f267388557e9 *man/zabinomial.Rd
+7fdb1e52df331edbf0e234b7f455a9e0 *man/zageomUC.Rd
+27960c593ab3e907048e7ef7523b1efb *man/zageometric.Rd
+cbc82d4435bdb4bcf8d8c4a2d5a9e483 *man/zanegbinUC.Rd
+a214209935a1a86d8129d38fe37cc05c *man/zanegbinomial.Rd
+ce015717ce27f27018754d67e3316957 *man/zapoisUC.Rd
+035de7769a8dabd54be20e64592e0bd4 *man/zapoisson.Rd
+61cce538df41d42d6e5daf8f37635527 *man/zero.Rd
+dc4cfc56ff0924b05ad0af86d916c23b *man/zeta.Rd
e0ef189ae8251b5e0d20b614c18cdd5a *man/zetaUC.Rd
-41b60aab45c01599e769a721da58ea86 *man/zetaff.Rd
-e5afe0b17fcaa9b76a65041923bd16d2 *man/zibinomUC.Rd
-01f756bb5ae0f72629faaf2035539e70 *man/zibinomial.Rd
-ac50e58f22d511a8b288f3a3f84bfb5f *man/zigeomUC.Rd
-e407a1f99753be923e2f1a1c512aa72d *man/zigeometric.Rd
-2410e68bca42fa95ee6d2347025bf21c *man/zinegbinUC.Rd
-2aa7fce4177b3599057a728d77c94f58 *man/zinegbinomial.Rd
-8548bc081e80aa464b3a4ffbf0a043f7 *man/zipebcom.Rd
-fe5ca22b6582340e5d6f4542c99446ae *man/zipf.Rd
-84b96ae71fbc091562e27a5997446aa5 *man/zipfUC.Rd
-7f91486b2e334088be2b61ec5ba187f6 *man/zipoisUC.Rd
-6714335e60bbb877ba24d424d186c8ba *man/zipoisson.Rd
+86813485832ea3097bccb17a30752861 *man/zetaff.Rd
+2dcc3a027d670144db7a96b4ccf48949 *man/zibinomUC.Rd
+6dab9406e35eba935bb67ff6c39c4b2e *man/zibinomial.Rd
+eac0a99dd131fe06d3ed428eb3f4c515 *man/zigeomUC.Rd
+a49780b1594cd24043384312ccf975ad *man/zigeometric.Rd
+5a3c5dfb9a9340b0cbd930e1c3c30ad0 *man/zinegbinUC.Rd
+810f6051f65319950eaf7b623db4d357 *man/zinegbinomial.Rd
+3fac9599b8980c7ed980519facd5dfda *man/zipebcom.Rd
+e8e65cb1b0a3b7ae3bfb81222966024d *man/zipf.Rd
+15d3e6361ff82acece70960b06e13d1b *man/zipfUC.Rd
+e06712314cd3b09f403cfd0aea0b4b31 *man/zipoisUC.Rd
+e3bd4c85369f4fe2cc8d7996a792660f *man/zipoisson.Rd
4aaf5efcfbcf1bdf32b13f632ac3ed0f *src/caqo3.c
69d2fd2a25229e368e8cf93ed005f14f *src/fgam.f
f8fe99dcda865eceb06b66f4976f4bf2 *src/gautr.c
diff --git a/NAMESPACE b/NAMESPACE
index 511566c..8eab56d 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -7,8 +7,25 @@
useDynLib(VGAM)
+export(link2list)
+export(mlogit)
+export(perks, dperks, pperks, qperks, rperks)
+export(gumbelII, dgumbelII, pgumbelII, qgumbelII, rgumbelII)
+export(makeham, dmakeham, pmakeham, qmakeham, rmakeham)
+export(gompertz, dgompertz, pgompertz, qgompertz, rgompertz)
+export(lindley, dlind, plind, rlind)
+
+
+export(w.wz.merge, w.y.check, vweighted.mean.default)
+export(is.parallel.matrix, is.parallel.vglm,
+ is.zero.matrix, is.zero.vglm)
+exportMethods(is.parallel, is.zero)
+
+
+export(nvar_vlm)
+
importFrom("stats4", nobs)
exportMethods(nobs)
@@ -44,7 +61,7 @@ export(lrtest, lrtest_vglm)
export(update_default, update_formula)
-export(nvar, nvar.vlm, nvar.vgam, nvar.rrvglm, nvar.qrrvglm, nvar.cao, nvar.rcam)
+export(nvar, nvar.vlm, nvar.vgam, nvar.rrvglm, nvar.qrrvglm, nvar.cao, nvar.rcim)
export( nobs.vlm)
@@ -90,8 +107,8 @@ explogarithmic, dexplog, pexplog, qexplog, rexplog)
-export(Rcam, plotrcam0,
-rcam, summaryrcam)
+export(Rcim, plotrcim0,
+rcim, summaryrcim)
export(moffset)
export(plotqvar, Qvar)
export(depvar, depvar.vlm)
@@ -173,7 +190,6 @@ AICvlm, AICvgam, AICrrvglm,
AICqrrvglm, # AICvglm,
anova.vgam,
anova.vglm,
-beta4,
bisa, dbisa, pbisa, qbisa, rbisa,
betabinomial.ab, betabinomial,
dexpbinomial,
@@ -456,7 +472,8 @@ deexp, peexp, qeexp, reexp)
export(
meplot, meplot.default, meplot.vlm,
guplot, guplot.default, guplot.vlm,
-negbinomial, negbinomial.size, polya, normal1, nbcanlink,
+negbinomial, negbinomial.size, polya, normal1,
+nbcanlink,
tobit, dtobit, ptobit, qtobit, rtobit,
Opt,
perspqrrvglm, plotdeplot.lmscreg, plotqrrvglm, plotqtplot.lmscreg,
@@ -514,7 +531,7 @@ export(DeLury,
exportClasses(vglmff, vlm, vglm, vgam,
-rrvglm, qrrvglm, grc, rcam,
+rrvglm, qrrvglm, grc, rcim,
vlmsmall, uqo, cao,
summary.vgam, summary.vglm, summary.vlm,
summary.qrrvglm,
diff --git a/NEWS b/NEWS
index c7b3e45..256dccf 100755
--- a/NEWS
+++ b/NEWS
@@ -1,11 +1,94 @@
**************************************************
* *
- * 0.8 SERIES NEWS *
+ * 0.9 SERIES NEWS *
* *
**************************************************
+ CHANGES IN VGAM VERSION 0.9-0
+
+NEW FEATURES
+
+ o Major change: VGAM family functions no longer have
+ arguments such as earg, escale, eshape, etc. Arguments such
+ as offset that used to be passed in via those arguments can
+ be done directly through the link function. For example,
+ gev(lshape = "logoff", eshape = list(offset = 0.5)) is
+ replaced by gev(lshape = logoff(offset = 0.5)). The @misc
+ slot retains the $link and $earg components, however,
+ the latter is in a different format. Functions such as
+ dtheta.deta(), d2theta.deta2(), eta2theta(), theta2eta()
+ have been modified. Link functions have been simplified
+ somewhat. The casual user will probably not be affected,
+ but programmers will. Sorry about this!
+ o New VGAM family functions:
+ [dpqr]gompertz(), [dpqr]gumbelII(), [dpr]lindley(),
+ [dpqr]makeham(), [dpqr]perks().
+ o df.residual() supports a new formula/equation for 'type = "lm"'.
+ o garma("reciprocal") supported.
+ o is.parallel() for constraint matrices summary.
+ o Improved family functions:
+ these can handle multiple responses:
+ benini(), chisq(), erlang(), exponential(), gamma1(), geometric(),
+ gpd(), inv.gaussianff(), logff(), maxwell(), rayleigh(),
+ yulesimon(), zetaff().
+ o New data set: hormone
+ [http://www.stat.tamu.edu/~carroll/data/hormone_data.txt].
+ o If a factor response is not ordered then a warning
+ is issued for acat(), cratio(), cumulative() and sratio().
+ o New dpqr-type functions:
+ [dpqr]perks(), [dpqr]mperks(), [dpqr]mbeard().
+ o Argument 'parallel' added to gamma2().
+ o New link functions: mlogit().
+
+
+BUG FIXES and CHANGES
+
+ o zibinomial() had 1 wrong element in the EIM; one of the
+ corrections of VGAM 0.8-4 was actually incorrect.
+ o zibinomial() blurb was wrong:
+ previously was "(1 - pstr0) * prob / (1 - (1 - prob)^w)" where
+ prob is the mean of the ordinary binomial distribution.
+ Now is "(1 - pstr0) * prob".
+ o betaff() no longer has "A" and "B" arguments; they ar
+ extracted from "lmu = elogit(min = A, max = B)".
+ o binom2.rho() has "lmu" as a new argument 2.
+ o logistic2() has has zero = -2 as default, and can handle
+ multiple responses.
+ o gengamma() returned the wrong mean (picked up by Andrea Venturini):
+ not b * k but b * gamma(k + 1 / d) / gamma(k).
+ o tobit.Rd nows states vector values for 'Lower' and 'Upper'
+ are permitted. Also, the @misc$Lower and @misc$Upper are
+ matrices of the same dimension as the response.
+ o constraints.vlm(type = c("vlm", "lm")) has been changed to
+ constraints.vlm(type = c("lm", "term")) [respectively].
+ o Rcam() renamed to Rcim(), and rcam() renamed to rcim().
+ Class "rcam" changed to "rcim".
+ o Days changed from "Monday" to "Mon" in all crash data frames, etc.
+ o w.wz.merge() written to handle the working weights
+ for multiple responses.
+ w.y.check() written to check the integrity of prior
+ weights and response.
+ o Argument 'sameScale' changed to 'eq.scale',
+ 'quantile.probs' in negbinomial-type families changed to 'probs.y'.
+ o No more warnings: dirmultinomial().
+ o Renamed arguments: benini(earg <- eshape),
+ binormal(equalmean <- eq.mean),
+ binormal(equalsd <- eq.sd),
+ o dirmultinomial() can handle a 1-row response [thanks to Peng Yu].
+ o weibull() gives improved warnings re. the shape parameter wrt
+ regularity conditions.
+ o The 12 most time-consuming examples have been placed in a
+ \dontrun{} to save time.
+ o Argument "prob.x" renamed to "probs.x".
+ o Argument "hbw" removed from iam().
+ o Argument 'name' is passed into .C and .Fortran() [in dotC()
+ and dotFortran()] is now okay because the first argument
+ is unnamed.
+
+
+
CHANGES IN VGAM VERSION 0.8-7
NEW FEATURES
@@ -37,8 +120,6 @@ BUG FIXES and CHANGES
-
-
CHANGES IN VGAM VERSION 0.8-6
NEW FEATURES
diff --git a/R/Links.R b/R/Links.R
new file mode 100644
index 0000000..865df57
--- /dev/null
+++ b/R/Links.R
@@ -0,0 +1,236 @@
+# These functions are
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# All rights reserved.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ dtheta.deta <-
+ function(theta,
+ link = "identity",
+ earg = list(theta = theta, # Needed
+ inverse = FALSE,
+ deriv = 1,
+ short = TRUE,
+ tag = FALSE)) {
+
+
+ function.name <- link
+
+ function.name2 <- attr(earg, "function.name")
+ if (length(function.name2) && function.name != function.name2) {
+ warning("apparent conflict in name of link function")
+ }
+
+ earg[["theta"]] <- theta # New data
+
+ earg[["deriv"]] <- 1 # New
+
+
+ do.call(what = function.name, args = earg)
+}
+
+
+
+
+
+ d2theta.deta2 <-
+ function(theta,
+ link = "identity",
+ earg = list(theta = theta, # Needed
+ inverse = FALSE,
+ deriv = 2,
+ short = TRUE,
+ tag = FALSE)) {
+
+
+ function.name <- link
+
+ function.name2 <- attr(earg, "function.name")
+ if (length(function.name2) && function.name != function.name2)
+ warning("apparent conflict in name of link function in D2theta.deta2")
+
+ earg[["theta"]] <- theta # New data
+
+ earg[["deriv"]] <- 2 # New
+
+ do.call(what = function.name, args = earg)
+}
+
+
+
+ theta2eta <-
+ function(theta,
+ link = "identity",
+ earg = list(theta = NULL)) {
+
+ function.name <- link
+
+ function.name2 <- attr(earg, "function.name")
+ if (length(function.name2) && function.name != function.name2)
+ warning("apparent conflict in name of link function")
+
+ earg[["theta"]] <- theta # New data
+
+ do.call(what = function.name, args = earg)
+}
+
+
+
+
+ eta2theta <-
+ function(theta, # This is really eta.
+ link = "identity",
+ earg = list(theta = NULL)) {
+
+
+ orig.earg <- earg
+ if (!is.list(earg))
+ stop("argument 'earg' is not a list")
+
+ level1 <- length(earg) > 3 &&
+ length(intersect(names(earg),
+ c("theta", "inverse", "deriv", "short", "tag"))) > 3
+
+ if (level1)
+ earg <- list(oneOnly = earg)
+
+
+
+
+
+
+
+ llink <- length(link)
+
+ if (llink != length(earg))
+ stop("length of argument 'link' differs from ",
+ "length of argument 'earg'")
+ if (llink == 0)
+ stop("length(earg) == 0 not allowed")
+
+
+ if (llink == 1) { # ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+
+
+ if (is.list(earg[[1]]))
+ earg <- earg[[1]]
+
+ function.name <- link
+
+ function.name2 <- attr(earg, "function.name") # May be, e.g., NULL
+ if (length(function.name2) && function.name != function.name2)
+ warning("apparent conflict in name of link function")
+
+
+ earg[["theta"]] <- theta # New data
+
+ earg[["inverse"]] <- TRUE # New
+
+ return(do.call(what = function.name, args = earg))
+ }
+
+
+
+
+
+
+
+
+
+ if (!is.matrix(theta) &&
+ length(theta) == length(earg))
+ theta <- rbind(theta)
+
+
+ ans <- NULL
+ for(iii in 1:llink) {
+ use.earg <- earg[[iii]]
+ use.earg[["inverse"]] <- TRUE # New
+ use.earg[["theta"]] <- theta[, iii] # New
+ use.function.name <- link[iii]
+
+ ans <- cbind(ans, do.call(what = use.function.name,
+ args = use.earg))
+ }
+
+ if (length(orig.earg) == ncol(ans) &&
+ length(names(orig.earg)) > 0 &&
+ ncol(ans) > 0)
+ colnames(ans) <- names(orig.earg)
+
+ ans
+}
+
+
+
+
+ namesof <- function(theta,
+ link = "identity",
+ earg = list(tag = tag, short = short),
+ tag = FALSE,
+ short = TRUE) {
+
+
+ earg[["theta"]] <- as.character(theta)
+ earg[["tag"]] <- tag
+ earg[["short"]] <- short
+ do.call(link, args = earg)
+}
+
+
+
+
+
+
+link2list <- function(link
+ ) {
+
+ ans <- link
+
+ fun.name <- as.character(ans[[1]])
+
+
+ big.list <- as.list(as.function(get(fun.name)))
+
+
+ big.list[[length(big.list)]] <- NULL # Kill the body of code
+
+
+
+
+
+ t.index <- pmatch(names(ans[-1]), names(big.list))
+ t.index
+ if (any(is.na(t.index)))
+ stop("in '", fun.name, "' could not match argument(s) ",
+ paste('"', names(ans[-1])[is.na(t.index)], '"', sep = "",
+ collapse = ", "))
+
+
+ Big.list <- big.list
+ trivial.call <- (length(t.index) == 0)
+ if (!trivial.call) {
+ Big.list[t.index] <- ans[-1]
+ }
+
+
+ attr(Big.list, "function.name") <- fun.name
+
+
+ Big.list
+}
+
+
+
+
diff --git a/R/aamethods.q b/R/aamethods.q
index 766236c..eb3113d 100644
--- a/R/aamethods.q
+++ b/R/aamethods.q
@@ -9,11 +9,13 @@
-is.Numeric <- function(x, allowable.length=Inf, integer.valued=FALSE, positive=FALSE)
+is.Numeric <- function(x, allowable.length = Inf,
+ integer.valued = FALSE, positive = FALSE)
if (all(is.numeric(x)) && all(is.finite(x)) &&
- (if(is.finite(allowable.length)) length(x)==allowable.length else TRUE) &&
- (if(integer.valued) all(x==round(x)) else TRUE) &&
- (if(positive) all(x>0) else TRUE)) TRUE else FALSE
+ (if (is.finite(allowable.length))
+ length(x) == allowable.length else TRUE) &&
+ (if (integer.valued) all(x == round(x)) else TRUE) &&
+ (if (positive) all(x>0) else TRUE)) TRUE else FALSE
VGAMenv <- new.env()
@@ -22,7 +24,8 @@ VGAMenv <- new.env()
-.onLoad <- function(lib, pkg) require(methods) # 25/1/05
+.onLoad <- function(lib, pkg)
+ require(methods) # 25/1/05
@@ -53,7 +56,7 @@ setClass("vglmff", representation(
"deviance" = "function",
"fini" = "expression",
"first" = "expression",
- "infos" = "function", # Added 20101203
+ "infos" = "function", # Added 20101203
"initialize" = "expression",
"last" = "expression",
"linkfun" = "function",
@@ -64,7 +67,7 @@ setClass("vglmff", representation(
"summary.dispersion" = "logical",
"vfamily" = "character",
"deriv" = "expression",
- "weight" = "expression"), # "call"
+ "weight" = "expression"), # "call"
prototype = .VGAM.prototype.list)
@@ -318,17 +321,17 @@ new("vglm", "extra"=from at extra,
- setClass("rcam0", representation(not.needed = "numeric"),
+ setClass("rcim0", representation(not.needed = "numeric"),
contains = "vglm") # Added 20110506
- setClass("rcam", representation(not.needed = "numeric"),
+ setClass("rcim", representation(not.needed = "numeric"),
contains = "rrvglm")
setClass("grc", representation(not.needed = "numeric"),
contains = "rrvglm")
-setMethod("summary", "rcam",
+setMethod("summary", "rcim",
function(object, ...)
- summary.rcam(object, ...))
+ summary.rcim(object, ...))
setMethod("summary", "grc",
function(object, ...)
@@ -463,8 +466,8 @@ if (!isGeneric("residuals"))
if (!isGeneric("weights"))
- setGeneric("weights", function(object, ...) standardGeneric("weights"),
- package = "VGAM")
+ setGeneric("weights", function(object, ...)
+ standardGeneric("weights"), package = "VGAM")
diff --git a/R/coef.vlm.q b/R/coef.vlm.q
index 439cf8d..7d57ebd 100644
--- a/R/coef.vlm.q
+++ b/R/coef.vlm.q
@@ -9,42 +9,42 @@
coefvlm <- function(object, matrix.out = FALSE, label = TRUE) {
- ans <- object at coefficients
- if (!label)
- names(ans) <- NULL
- if (!matrix.out)
- return(ans)
+ ans <- object at coefficients
+ if (!label)
+ names(ans) <- NULL
+ if (!matrix.out)
+ return(ans)
- ncolx <- object at misc$p # = length(object at constraints)
- M <- object at misc$M
-
- Blist <- object at constraints
- if (all(trivial.constraints(Blist) == 1)) {
- Bmat <- matrix(ans, nrow=ncolx, ncol = M, byrow = TRUE)
- } else {
- Bmat <- matrix(as.numeric(NA), nrow = ncolx, ncol = M)
-
- if (!matrix.out)
- return(ans)
-
- ncolBlist <- unlist(lapply(Blist, ncol))
- nasgn <- names(Blist)
- temp <- c(0, cumsum(ncolBlist))
- for(ii in 1:length(nasgn)) {
- index <- (temp[ii]+1):temp[ii+1]
- cmat <- Blist[[nasgn[ii]]]
- Bmat[ii,] <- cmat %*% ans[index]
- }
- }
+ ncolx <- object at misc$p # = length(object at constraints)
+ M <- object at misc$M
+
+ Blist <- object at constraints
+ if (all(trivial.constraints(Blist) == 1)) {
+ Bmat <- matrix(ans, nrow = ncolx, ncol = M, byrow = TRUE)
+ } else {
+ Bmat <- matrix(as.numeric(NA), nrow = ncolx, ncol = M)
- if (label) {
- d1 <- object at misc$colnames.x
- d2 = object at misc$predictors.names # Could be NULL
- dimnames(Bmat) <- list(d1, d2)
+ if (!matrix.out)
+ return(ans)
+
+ ncolBlist <- unlist(lapply(Blist, ncol))
+ nasgn <- names(Blist)
+ temp <- c(0, cumsum(ncolBlist))
+ for(ii in 1:length(nasgn)) {
+ index <- (temp[ii] + 1):temp[ii + 1]
+ cmat <- Blist[[nasgn[ii]]]
+ Bmat[ii,] <- cmat %*% ans[index]
}
+ }
+
+ if (label) {
+ d1 <- object at misc$colnames.x
+ d2 <- object at misc$predictors.names # Could be NULL
+ dimnames(Bmat) <- list(d1, d2)
+ }
- Bmat
+ Bmat
} # end of coefvlm
@@ -62,8 +62,10 @@ setMethod("coef", "vglm", function(object, ...)
-setMethod("coefficients", "summary.vglm", function(object, ...) object at coef3)
-setMethod("coef", "summary.vglm", function(object, ...) object at coef3)
+setMethod("coefficients", "summary.vglm", function(object, ...)
+ object at coef3)
+setMethod("coef", "summary.vglm", function(object, ...)
+ object at coef3)
@@ -74,30 +76,41 @@ setMethod("coef", "summary.vglm", function(object, ...) object at coef3)
Coef.vlm <- function(object, ...) {
- LL <- length(object at family@vfamily)
- funname = paste("Coef.", object at family@vfamily[LL], sep="")
- if (exists(funname)) {
- newcall = paste("Coef.", object at family@vfamily[LL],
- "(object, ...)", sep="")
- newcall = parse(text=newcall)[[1]]
- eval(newcall)
- } else
- if (length(tmp2 <- object at misc$link) &&
- object at misc$intercept.only &&
- trivial.constraints(object at constraints)) {
-
- answer = eta2theta(rbind(coefvlm(object)),
- link = object at misc$link,
- earg = object at misc$earg)
- answer = c(answer)
- if (length(ntmp2 <- names(tmp2)) == object at misc$M)
- names(answer) = ntmp2
- answer
- } else {
- coefvlm(object, ... )
- }
+
+
+ LL <- length(object at family@vfamily)
+ funname <- paste("Coef.", object at family@vfamily[LL], sep = "")
+
+ if (exists(funname)) {
+ newcall <- paste("Coef.", object at family@vfamily[LL],
+ "(object, ...)", sep = "")
+ newcall <- parse(text = newcall)[[1]]
+ eval(newcall)
+ } else
+ if (length(tmp2 <- object at misc$link) &&
+ object at misc$intercept.only &&
+ trivial.constraints(object at constraints)) {
+
+
+ answer <-
+ eta2theta(rbind(coefvlm(object)),
+ link = object at misc$link,
+ earg = object at misc$earg)
+
+
+ answer <- c(answer)
+ if (length(ntmp2 <- names(tmp2)) == object at misc$M)
+ names(answer) <- ntmp2
+ answer
+ } else {
+ coefvlm(object, ... )
+ }
}
+
+
+
+
setMethod("Coefficients", "vlm", function(object, ...)
Coef.vlm(object, ...))
setMethod("Coef", "vlm", function(object, ...)
diff --git a/R/deviance.vlm.q b/R/deviance.vlm.q
index 8db09b2..5a4b7b5 100644
--- a/R/deviance.vlm.q
+++ b/R/deviance.vlm.q
@@ -6,6 +6,7 @@
+
deviance.vlm <- function(object, ...)
object at criterion$deviance
@@ -32,9 +33,11 @@ setMethod("deviance", "vglm", function(object, ...)
df.residual_vlm <- function(object, type = c("vlm", "lm"), ...) {
type <- type[1]
+
+
switch(type,
vlm = object at df.residual,
- lm = nobs(object, type = "lm") - nvar(object, type = "lm"),
+ lm = nobs(object, type = "lm") - nvar_vlm(object, type = "lm"),
stop("argument 'type' unmatched"))
}
@@ -47,4 +50,99 @@ setMethod("df.residual", "vlm", function(object, ...)
+nvar_vlm <- function(object, ...) {
+
+
+ M = npred(object)
+ allH = matrix(unlist(constraints(object, type = "lm")), nrow = M)
+ checkNonZero = function(m) sum(as.logical(m))
+ numPars = apply(allH, 1, checkNonZero)
+ if (length(object at misc$predictors.names) == M)
+ names(numPars) = object at misc$predictors.names
+
+
+ NumPars = rep(0, length = M)
+ for (jay in 1:M) {
+ X_lm_jay = model.matrix(object, type = "lm", lapred.index = jay)
+ NumPars[jay] = ncol(X_lm_jay)
+ }
+ if (length(object at misc$predictors.names) == M)
+ names(NumPars) = object at misc$predictors.names
+ if (!all(NumPars == numPars)) {
+ print(NumPars - numPars) # Should be all 0s
+ stop("something wrong in nvar_vlm()")
+ }
+
+ numPars
+}
+
+
+
+
+
+
+
+
+
+
+
+
+if (FALSE) {
+
+
+set.seed(123)
+zapdat = data.frame(x2 = runif(nn <- 2000))
+zapdat = transform(zapdat, p0 = logit(-0.5 + 1*x2, inverse = TRUE),
+ lambda = loge( 0.5 + 2*x2, inverse = TRUE),
+ f1 = gl(4, 50, labels = LETTERS[1:4]),
+ x3 = runif(nn))
+zapdat = transform(zapdat, y = rzapois(nn, lambda, p0))
+with(zapdat, table(y))
+
+
+fit1 = vglm(y ~ x2, zapoisson, zapdat, trace = TRUE)
+fit1 = vglm(y ~ bs(x2), zapoisson, zapdat, trace = TRUE)
+coef(fit1, matrix = TRUE) # These should agree with the above values
+
+
+fit2 = vglm(y ~ bs(x2) + x3, zapoisson(zero = 2), zapdat, trace = TRUE)
+coef(fit2, matrix = TRUE)
+
+
+clist = list("(Intercept)" = diag(2), "x2" = rbind(0,1),
+ "x3" = rbind(1,0))
+fit3 = vglm(y ~ x2 + x3, zapoisson(zero = NULL), zapdat,
+ constraints = clist, trace = TRUE)
+coef(fit3, matrix = TRUE)
+
+
+constraints(fit2, type = "term")
+constraints(fit2, type = "lm")
+head(model.matrix(fit2, type = "term"))
+head(model.matrix(fit2, type = "lm"))
+
+
+
+
+allH = matrix(unlist(constraints(fit1)), nrow = fit1 at misc$M)
+allH = matrix(unlist(constraints(fit2)), nrow = fit2 at misc$M)
+allH = matrix(unlist(constraints(fit3)), nrow = fit3 at misc$M)
+
+
+checkNonZero = function(m) sum(as.logical(m))
+
+(numPars = apply(allH, 1, checkNonZero))
+
+
+nvar_vlm(fit1)
+nvar_vlm(fit2)
+nvar_vlm(fit3)
+
+
+}
+
+
+
+
+
diff --git a/R/family.actuary.R b/R/family.actuary.R
new file mode 100644
index 0000000..bc700b2
--- /dev/null
+++ b/R/family.actuary.R
@@ -0,0 +1,2090 @@
+# These functions are
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# All rights reserved.
+
+
+
+
+
+
+
+
+
+
+
+
+
+dgumbelII <- function(x, shape, scale = 1, log = FALSE) {
+
+
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ LLL <- max(length(x), length(shape), length(scale))
+ if (length(x) != LLL) x <- rep(x, length.out = LLL)
+ if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
+ if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+
+
+ ans <- x
+ index0 <- (x < 0) & is.finite(x) & !is.na(x)
+
+ ans[!index0] <- log(shape[!index0] / scale[!index0]) +
+ (shape[!index0] + 1) * log(scale[!index0] / x[!index0]) -
+ (x[!index0] / scale[!index0])^(-shape[!index0])
+ ans[index0] <- log(0)
+ ans[x == Inf] <- log(0)
+
+ if (log.arg) {
+ } else {
+ ans <- exp(ans)
+ ans[index0] <- 0
+ ans[x == Inf] <- 0
+ }
+ ans[shape <= 0 | scale <= 0] <- NaN
+ ans
+}
+
+
+pgumbelII <- function(q, shape, scale = 1) {
+
+ LLL <- max(length(q), length(shape), length(scale))
+ if (length(q) != LLL) q <- rep(q, length.out = LLL)
+ if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
+ if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+
+
+ ans <- exp(-(q / scale)^(-shape))
+ ans[(q <= 0)] <- 0
+ ans[shape <= 0 | scale <= 0] <- NaN
+ ans[q == Inf] <- 1
+ ans
+}
+
+
+
+qgumbelII <- function(p, shape, scale = 1) {
+
+ LLL <- max(length(p), length(shape), length(scale))
+ if (length(p) != LLL) p <- rep(p, length.out = LLL)
+ if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
+ if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+
+
+ ans <- scale * (-log(p))^(-1 / shape)
+ ans[p < 0] <- NaN
+ ans[p == 0] <- 0
+ ans[p == 1] <- Inf
+ ans[p > 1] <- NaN
+ ans[shape <= 0 | scale <= 0] <- NaN
+ ans
+}
+
+
+rgumbelII <- function(n, shape, scale = 1) {
+ qgumbelII(runif(n), shape = shape, scale = scale)
+}
+
+
+
+
+
+
+
+
+
+ gumbelII <-
+ function(lshape = "loge", lscale = "loge",
+ ishape = NULL, iscale = NULL,
+ probs.y = c(0.2, 0.5, 0.8),
+ perc.out = NULL, # 50,
+ imethod = 1, zero = -2)
+{
+
+
+ lshape <- as.list(substitute(lshape))
+ e.shape <- link2list(lshape)
+ l.shape <- attr(e.shape, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ e.scale <- link2list(lscale)
+ l.scale <- attr(e.scale, "function.name")
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE))
+ stop("bad input for argument 'zero'")
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+ if (!is.Numeric(probs.y, positive = TRUE) ||
+ length(probs.y) < 2 ||
+ max(probs.y) >= 1)
+ stop("bad input for argument 'probs.y'")
+ if (length(perc.out))
+ if (!is.Numeric(perc.out, positive = TRUE) ||
+ max(probs.y) >= 100)
+ stop("bad input for argument 'perc.out'")
+
+
+ if (length(ishape))
+ if (!is.Numeric(ishape, positive = TRUE))
+ stop("argument 'ishape' values must be positive")
+ if (length(iscale))
+ if (!is.Numeric(iscale, positive = TRUE))
+ stop("argument 'iscale' values must be positive")
+
+
+ new("vglmff",
+ blurb = c("Gumbel Type II distribution\n\n",
+ "Links: ",
+ namesof("shape", l.shape, e.shape), ", ",
+ namesof("scale", l.scale, e.scale), "\n",
+ "Mean: scale^(1/shape) * gamma(1 - 1 / shape)\n",
+ "Variance: scale^(2/shape) * (gamma(1 - 2/shape) - ",
+ "gamma(1 + 1/shape)^2)"),
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 2
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ perc.out = .perc.out ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .perc.out = perc.out
+ ))),
+
+ initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+ ncoly <- ncol(y)
+ Musual <- 2
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+ mynames1 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames2 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
+
+
+ predictors.names <-
+ c(namesof(mynames1, .l.shape , .e.shape , tag = FALSE),
+ namesof(mynames2, .l.scale , .e.scale , tag = FALSE))[
+ interleave.VGAM(M, M = Musual)]
+
+
+ Shape.init <- matrix(if(length( .ishape )) .ishape else 0 + NA,
+ n, ncoly, byrow = TRUE)
+ Scale.init <- matrix(if(length( .iscale )) .iscale else 0 + NA,
+ n, ncoly, byrow = TRUE)
+
+ if (!length(etastart)) {
+ if (!length( .ishape ) ||
+ !length( .iscale )) {
+ for (ilocal in 1:ncoly) {
+
+ anyc <- FALSE # extra$leftcensored | extra$rightcensored
+ i11 <- if ( .imethod == 1) anyc else FALSE # can be all data
+ probs.y <- .probs.y
+ xvec <- log(-log(probs.y))
+ fit0 <- lsfit(y = xvec,
+ x = log(quantile(y[!i11, ilocal],
+ probs = probs.y )))
+
+
+ if (!is.Numeric(Shape.init[, ilocal]))
+ Shape.init[, ilocal] <- -fit0$coef["X"]
+ if (!is.Numeric(Scale.init[, ilocal]))
+ Scale.init[, ilocal] <-
+ exp(fit0$coef["Intercept"] / Shape.init[, ilocal])
+ } # ilocal
+
+ etastart <-
+ cbind(theta2eta(Shape.init, .l.shape , .e.shape ),
+ theta2eta(Scale.init, .l.scale , .e.scale ))[,
+ interleave.VGAM(M, M = Musual)]
+ }
+ }
+ }), list(
+ .l.scale = l.scale, .l.shape = l.shape,
+ .e.scale = e.scale, .e.shape = e.shape,
+ .iscale = iscale, .ishape = ishape,
+ .probs.y = probs.y,
+ .imethod = imethod ) )),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ Shape <- eta2theta(eta[, c(TRUE, FALSE)], .l.shape , .e.shape )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE)], .l.scale , .e.scale )
+ Shape <- as.matrix(Shape)
+
+ if (length( .perc.out ) > 1 && ncol(Shape) > 1)
+ stop("argument 'perc.out' should be of length one since ",
+ "there are multiple responses")
+
+ if (!length( .perc.out )) {
+ return(Scale * gamma(1 - 1 / Shape))
+ }
+
+ ans <- if (length( .perc.out ) > 1) {
+ qgumbelII(p = matrix( .perc.out / 100, length(Shape),
+ length( .perc.out ), byrow = TRUE),
+ shape = Shape, scale = Scale)
+ } else {
+ qgumbelII(p = .perc.out / 100, shape = Shape, scale = Scale)
+ }
+ colnames(ans) <- paste(as.character( .perc.out ), "%", sep = "")
+ ans
+ }, list(
+ .l.scale = l.scale, .l.shape = l.shape,
+ .e.scale = e.scale, .e.shape = e.shape,
+ .perc.out = perc.out ) )),
+ last = eval(substitute(expression({
+
+
+ Musual <- extra$Musual
+ misc$link <-
+ c(rep( .l.shape , length = ncoly),
+ rep( .l.scale , length = ncoly))[interleave.VGAM(M, M = Musual)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+ names(misc$link) <- temp.names
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- temp.names
+ for(ii in 1:ncoly) {
+ misc$earg[[Musual*ii-1]] <- .e.shape
+ misc$earg[[Musual*ii ]] <- .e.scale
+ }
+
+ misc$Musual <- Musual
+ misc$imethod <- .imethod
+ misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
+ misc$perc.out <- .perc.out
+ misc$true.mu <- FALSE # @fitted is not a true mu
+
+
+ }), list(
+ .l.scale = l.scale, .l.shape = l.shape,
+ .e.scale = e.scale, .e.shape = e.shape,
+ .perc.out = perc.out,
+ .imethod = imethod ) )),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE,eta, extra = NULL) {
+ Shape <- eta2theta(eta[, c(TRUE, FALSE)], .l.shape , .e.shape )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE)], .l.scale , .e.scale )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(c(w) * dgumbelII(x = y, shape = Shape,
+ scale = Scale, log = TRUE))
+ }, list( .l.scale = l.scale, .l.shape = l.shape,
+ .e.scale = e.scale, .e.shape = e.shape
+ ) )),
+ vfamily = c("gumbelII"),
+ deriv = eval(substitute(expression({
+ Musual <- 2
+ Shape <- eta2theta(eta[, c(TRUE, FALSE)], .l.shape , .e.shape )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE)], .l.scale , .e.scale )
+
+ dl.dshape <- 1 / Shape + log(Scale / y) -
+ log(Scale / y) * (Scale / y)^Shape
+ dl.dscale <- Shape / Scale - (Shape / y) * (Scale / y)^(Shape - 1)
+
+
+ dshape.deta <- dtheta.deta(Shape, .l.shape , .e.shape )
+ dscale.deta <- dtheta.deta(Scale, .l.scale , .e.scale )
+
+ myderiv <- c(w) * cbind(dl.dshape, dl.dscale) *
+ cbind(dshape.deta, dscale.deta)
+ myderiv[, interleave.VGAM(M, M = Musual)]
+ }), list( .l.scale = l.scale, .l.shape = l.shape,
+ .e.scale = e.scale, .e.shape = e.shape
+ ) )),
+ weight = eval(substitute(expression({
+ EulerM <- -digamma(1.0)
+
+
+ ned2l.dshape2 <- (1 + trigamma(2) + digamma(2)^2) / Shape^2
+ ned2l.dscale2 <- (Shape / Scale)^2
+ ned2l.dshapescale <- digamma(2) / Scale
+
+ wz <- matrix(0.0, n, M + M - 1) # wz is tridiagonal
+
+ ind11 <- ind22 <- ind12 <- NULL
+ for (ii in 1:(M / Musual)) {
+ ind11 <- c(ind11, iam(Musual*ii - 1, Musual*ii - 1, M))
+ ind22 <- c(ind22, iam(Musual*ii - 0, Musual*ii - 0, M))
+ ind12 <- c(ind12, iam(Musual*ii - 1, Musual*ii - 0, M))
+ }
+ wz[, ind11] <- ned2l.dshape2 * dshape.deta^2
+ wz[, ind22] <- ned2l.dscale2 * dscale.deta^2
+ wz[, ind12] <- ned2l.dshapescale * dscale.deta * dshape.deta
+
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
+ }), list( .l.scale = l.scale, .l.shape = l.shape ))))
+}
+
+
+
+
+
+dmbeard <- function(x, shape, scale = 1, rho, epsilon, log = FALSE) {
+
+
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ LLL <- max(length(x), length(shape), length(scale),
+ length(rho), length(epsilon))
+ if (length(x) != LLL) x <- rep(x, length.out = LLL)
+ if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
+ if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+ if (length(rho) != LLL) rho <- rep(rho, length.out = LLL)
+ if (length(epsilon) != LLL) epsilon <- rep(epsilon, length.out = LLL)
+
+
+ index0 = (x < 0)
+
+ ans <- log(epsilon * exp(-x * scale) + shape) +
+ (-epsilon * x -
+ ((rho * epsilon - 1) / (rho * scale)) *
+ (log1p(rho * shape) -
+ log(exp(-x * scale) + rho * shape) - scale * x)) -
+ log(exp(-x * scale) + shape * rho)
+
+ ans[index0] <- log(0)
+ ans[x == Inf] <- log(0)
+
+ if (log.arg) {
+ } else {
+ ans <- exp(ans)
+ ans[index0] <- 0
+ ans[x == Inf] <- 0
+ }
+ ans[shape <= 0 | scale <= 0 | rho <= 0 | epsilon <= 0] <- NaN
+ ans
+}
+
+
+pmbeard <- function(q, shape, scale = 1, rho, epsilon) {
+
+ LLL <- max(length(q), length(shape), length(scale),
+ length(rho), length(epsilon))
+ if (length(q) != LLL) q <- rep(q, length.out = LLL)
+ if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
+ if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+ if (length(rho) != LLL) rho <- rep(rho, length.out = LLL)
+ if (length(epsilon) != LLL) epsilon <- rep(epsilon, length.out = LLL)
+
+
+ ans <- -expm1(-epsilon * q -
+ ((rho * epsilon - 1) / (rho * scale)) *
+ (log1p(rho * shape) -
+ log(exp(-scale * q) + rho * shape) - scale * q))
+ ans[(q <= 0)] <- 0
+ ans[shape <= 0 | scale <= 0 | rho <= 0 | epsilon <= 0] <- NaN
+ ans[q == Inf] <- 1
+ ans
+}
+
+
+
+
+
+
+
+dmperks <- function(x, shape, scale = 1, epsilon, log = FALSE) {
+
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ LLL <- max(length(x), length(shape), length(scale), length(epsilon))
+ if (length(x) != LLL) x <- rep(x, length.out = LLL)
+ if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
+ if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+ if (length(epsilon) != LLL) epsilon <- rep(epsilon, length.out = LLL)
+
+
+ index0 = (x < 0)
+ ans <- log(epsilon * exp(-x * scale) + shape) +
+ (-epsilon * x -
+ ((epsilon - 1) / scale) *
+ (log1p(shape) -
+ log(shape + exp(-x * scale)) -x * scale)) -
+ log(exp(-x * scale) + shape)
+
+ ans[index0] <- log(0)
+ ans[x == Inf] <- log(0)
+ if (log.arg) {
+ } else {
+ ans <- exp(ans)
+ ans[index0] <- 0
+ ans[x == Inf] <- 0
+ }
+ ans[shape <= 0 | scale <= 0 | epsilon <= 0] <- NaN
+ ans
+}
+
+
+
+pmperks <- function(q, shape, scale = 1, epsilon) {
+
+ LLL <- max(length(q), length(shape), length(scale))
+ if (length(q) != LLL) q <- rep(q, length.out = LLL)
+ if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
+ if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+
+
+ ans <- -expm1(-epsilon * q -
+ ((epsilon - 1) / scale) *
+ (log1p(shape) -
+ log(shape + exp(-q * scale)) - q * scale))
+
+ ans[(q <= 0)] <- 0
+ ans[shape <= 0 | scale <= 0] <- NaN
+ ans[q == Inf] <- 1
+ ans
+}
+
+
+
+
+
+
+
+
+
+
+
+
+dbeard <- function(x, shape, scale = 1, rho, log = FALSE) {
+
+ warning("does not integrate to unity")
+
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ LLL <- max(length(x), length(shape), length(scale), length(rho))
+ if (length(x) != LLL) x <- rep(x, length.out = LLL)
+ if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
+ if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+ if (length(rho) != LLL) rho <- rep(rho, length.out = LLL)
+
+ index0 = (x < 0)
+ ans <- log(shape) - x * scale * (rho^(-1 / scale)) +
+ log(rho) + log(scale) +
+ (rho^(-1 / scale)) * log1p(shape * rho) -
+ (1 + rho^(-1 / scale)) *
+ log(shape * rho + exp(-x * scale))
+ ans[index0] <- log(0)
+ ans[x == Inf] <- log(0)
+
+
+ if (log.arg) {
+ } else {
+ ans <- exp(ans)
+ ans[index0] <- 0
+ ans[x == Inf] <- 0
+ }
+ ans[shape <= 0 | scale <= 0 | rho <= 0] <- NaN
+ ans
+}
+
+
+
+
+
+
+dbeard <- function(x, shape, scale = 1, rho, log = FALSE) {
+alpha=shape; beta=scale;
+
+ warning("does not integrate to unity")
+
+ ret=ifelse(x<=0 | beta<=0,NaN,
+ exp(alpha+beta*x)*(1+exp(alpha+rho))**(exp(-rho/beta))/
+ (1+exp(alpha+rho+beta*x))**(1+exp(-rho/beta)))
+ ret
+}
+
+
+
+qbeard=function(x,u=0.5,alpha=1,beta=1,rho=1) {
+ ret = ifelse(x<=0 | u<=0 | u>=1 | length(x)!=length(u) | beta<=0,
+ NaN, (1/beta)*
+ (log((u**(-beta*exp(rho)))*
+ (1+exp(alpha+rho+beta*x))-1)-alpha-rho)-x)
+
+ return(ret)
+}
+
+
+
+
+
+
+
+
+
+
+dperks <- function(x, shape, scale = 1, log = FALSE) {
+
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ LLL <- max(length(x), length(shape), length(scale))
+ if (length(x) != LLL) x <- rep(x, length.out = LLL)
+ if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
+ if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+
+ index0 = (x < 0)
+ ans <- log(shape) - x +
+ log1p(shape) / scale -
+ (1 + 1 / scale) * log(shape + exp(-x * scale))
+ ans[index0] <- log(0)
+ ans[x == Inf] <- log(0)
+
+ if (log.arg) {
+ } else {
+ ans <- exp(ans)
+ ans[index0] <- 0
+ ans[x == Inf] <- 0
+ }
+ ans[shape <= 0 | scale <= 0] <- NaN
+ ans
+}
+
+
+
+pperks <- function(q, shape, scale = 1) {
+
+ LLL <- max(length(q), length(shape), length(scale))
+ if (length(q) != LLL) q <- rep(q, length.out = LLL)
+ if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
+ if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+
+ logS <- -q + (log1p(shape) -
+ log(shape + exp(-q * scale))) / scale
+ ans <- -expm1(logS)
+
+ ans[(q <= 0)] <- 0
+ ans[shape <= 0 | scale <= 0] <- NaN
+ ans[q == Inf] <- 1
+ ans
+}
+
+
+qperks <- function(p, shape, scale = 1) {
+
+ LLL <- max(length(p), length(shape), length(scale))
+ if (length(p) != LLL) p <- rep(p, length.out = LLL)
+ if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
+ if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+
+ tmp <- scale * log1p(-p)
+ onemFb <- exp(tmp)
+ ans <- (log1p(shape - onemFb) - log(shape) - tmp) / scale
+ ans[p < 0] <- NaN
+ ans[p == 0] <- 0
+ ans[p > 1] <- NaN
+ ans[p == 1] <- Inf
+ ans[shape <= 0 | scale <= 0] <- NaN
+ ans
+}
+
+
+rperks <- function(n, shape, scale = 1) {
+ qperks(runif(n), shape = shape, scale = scale)
+}
+
+
+
+
+
+perks.control <- function(save.weight = TRUE, ...)
+{
+ list(save.weight = save.weight)
+}
+
+
+ perks <-
+ function(lshape = "loge", lscale = "loge",
+ ishape = NULL, iscale = NULL,
+ nsimEIM = 500,
+ oim.mean = FALSE,
+ zero = NULL)
+{
+
+ lshape <- as.list(substitute(lshape))
+ e.shape <- link2list(lshape)
+ l.shape <- attr(e.shape, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ e.scale <- link2list(lscale)
+ l.scale <- attr(e.scale, "function.name")
+
+
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE))
+ stop("bad input for argument 'nsimEIM'")
+ if (nsimEIM <= 50)
+ warning("argument 'nsimEIM' should be an integer ",
+ "greater than 50, say")
+
+
+ if (length(ishape))
+ if (!is.Numeric(ishape, positive = TRUE))
+ stop("argument 'ishape' values must be positive")
+ if (length(iscale))
+ if (!is.Numeric(iscale, positive = TRUE))
+ stop("argument 'iscale' values must be positive")
+
+
+
+
+ if (!is.logical(oim.mean) || length(oim.mean) != 1)
+ stop("bad input for argument 'oim.mean'")
+
+
+
+ new("vglmff",
+ blurb = c("Perks' distribution\n\n",
+ "Links: ",
+ namesof("shape", l.shape, e.shape), ", ",
+ namesof("scale", l.scale, e.scale), "\n",
+ "Median: qperks(p = 0.5, shape, scale)"),
+
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 2
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ nsimEIM = .nsimEIM,
+ zero = .zero )
+ }, list( .zero = zero,
+ .nsimEIM = nsimEIM ))),
+ initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+ ncoly <- ncol(y)
+ Musual <- 2
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+ mynames1 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames2 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ c(namesof(mynames1, .l.shape , .e.shape , tag = FALSE),
+ namesof(mynames2, .l.scale , .e.scale , tag = FALSE))[
+ interleave.VGAM(M, M = Musual)]
+
+
+
+ if (!length(etastart)) {
+
+ matH <- matrix(if (length( .ishape )) .ishape else 0 + NA,
+ n, ncoly, byrow = TRUE)
+ matC <- matrix(if (length( .iscale )) .iscale else 0 + NA,
+ n, ncoly, byrow = TRUE)
+
+ shape.grid <- c(exp(-seq(4, 0.1, len = 07)), 1,
+ exp( seq(0.1, 4, len = 07)))
+ scale.grid <- c(exp(-seq(4, 0.1, len = 07)), 1,
+ exp( seq(0.1, 4, len = 07)))
+
+ for (spp. in 1:ncoly) {
+ yvec <- y[, spp.]
+ wvec <- w[, spp.]
+
+ perks.Loglikfun <- function(scaleval, y, x, w, extraargs) {
+ ans <-
+ sum(c(w) * dperks(x = y, shape = extraargs$Shape,
+ scale = scaleval, log = TRUE))
+ ans
+ }
+
+ mymat <- matrix(-1, length(shape.grid), 2)
+ for (jlocal in 1:length(shape.grid)) {
+ mymat[jlocal, ] <-
+ getMaxMin(scale.grid,
+ objfun = perks.Loglikfun,
+ y = yvec, x = x, w = wvec,
+ ret.objfun = TRUE,
+ extraargs = list(Shape = shape.grid[jlocal]))
+ }
+ index.shape <- which(mymat[, 2] == max(mymat[, 2]))[1]
+
+ if (!length( .ishape ))
+ matH[, spp.] <- shape.grid[index.shape]
+ if (!length( .iscale ))
+ matC[, spp.] <- mymat[index.shape, 1]
+ } # spp.
+
+ etastart <-
+ cbind(theta2eta(matH, .l.shape , .e.shape ),
+ theta2eta(matC, .l.scale , .e.scale ))[,
+ interleave.VGAM(M, M = Musual)]
+ } # End of !length(etastart)
+ }), list( .l.scale = l.scale, .l.shape = l.shape,
+ .e.scale = e.scale, .e.shape = e.shape,
+ .ishape = ishape, .iscale = iscale
+ ))),
+
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ Shape <- eta2theta(eta[, c(TRUE, FALSE)], .l.shape , .e.shape )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE)], .l.scale , .e.scale )
+
+ qperks(p = 0.5, shape = Shape, scale = Scale)
+ }, list( .l.scale = l.scale, .l.shape = l.shape,
+ .e.scale = e.scale, .e.shape = e.shape ))),
+ last = eval(substitute(expression({
+
+ misc$link <-
+ c(rep( .l.shape , length = ncoly),
+ rep( .l.scale , length = ncoly))[interleave.VGAM(M, M = Musual)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+ names(misc$link) <- temp.names
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- temp.names
+ for(ii in 1:ncoly) {
+ misc$earg[[Musual*ii-1]] <- .e.shape
+ misc$earg[[Musual*ii ]] <- .e.scale
+ }
+
+
+ misc$Musual <- Musual
+ misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
+ misc$nsimEIM <- .nsimEIM
+ }), list( .l.scale = l.scale, .l.shape = l.shape,
+ .e.scale = e.scale, .e.shape = e.shape,
+ .nsimEIM = nsimEIM ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ Shape <- eta2theta(eta[, c(TRUE, FALSE)], .l.shape , .e.shape )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE)], .l.scale , .e.scale )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dperks(x = y, shape = Shape,
+ scale = Scale, log = TRUE))
+ }
+ }, list( .l.scale = l.scale, .l.shape = l.shape,
+ .e.scale = e.scale, .e.shape = e.shape ))),
+ vfamily = c("perks"),
+
+ deriv = eval(substitute(expression({
+ Musual <- 2
+ shape <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE],
+ .l.shape , .e.shape )
+ scale <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE],
+ .l.scale , .e.scale )
+
+
+ temp2 <- exp(y * scale)
+ temp3 <- 1 + shape * temp2
+ dl.dshape <- 1 / shape + 1 / (scale * (1 + shape)) -
+ (1 + 1 / scale) * temp2 / temp3
+ dl.dscale <- y - log1p(shape) / scale^2 +
+ log1p(shape * temp2) / scale^2 -
+ (1 + 1 / scale) * shape * y * temp2 / temp3
+
+ dshape.deta <- dtheta.deta(shape, .l.shape , .e.shape )
+ dscale.deta <- dtheta.deta(scale, .l.scale , .e.scale )
+
+ dthetas.detas <- cbind(dshape.deta, dscale.deta)
+ myderiv <- c(w) * cbind(dl.dshape, dl.dscale) * dthetas.detas
+ myderiv[, interleave.VGAM(M, M = Musual)]
+ }), list( .l.scale = l.scale, .l.shape = l.shape,
+ .e.scale = e.scale, .e.shape = e.shape ))),
+
+
+ weight = eval(substitute(expression({
+
+ NOS <- M / Musual
+ dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = Musual)]
+
+ wz <- matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
+
+ ind1 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
+
+
+ for(spp. in 1:NOS) {
+ run.varcov <- 0
+ Shape <- shape[, spp.]
+ Scale <- scale[, spp.]
+
+
+
+
+ if (FALSE && intercept.only && .oim.mean ) {
+
+ stop("this is wrong")
+ temp8 <- (1 + Shape * exp(Scale * y[, spp.]))^2
+ nd2l.dadb <- 2 * y[, spp.] * exp(Scale * y[, spp.]) / temp8
+
+ nd2l.dada <- 1 / Shape^2 + 1 / (1 + Shape)^2 -
+ 2 * exp(2 * Scale * y[, spp.]) / temp8
+
+ nd2l.dbdb <- 2 * Shape * y[, spp.]^2 * exp(Scale * y[, spp.]) / temp8
+
+
+ ave.oim11 <- weighted.mean(nd2l.dada, w[, spp.])
+ ave.oim12 <- weighted.mean(nd2l.dadb, w[, spp.])
+ ave.oim22 <- weighted.mean(nd2l.dbdb, w[, spp.])
+ run.varcov <- cbind(ave.oim11, ave.oim22, ave.oim12)
+ } else {
+
+ for(ii in 1:( .nsimEIM )) {
+ ysim <- rperks(n = n, shape = Shape, scale = Scale)
+if (ii < 3) {
+}
+
+ temp2 <- exp(ysim * Scale)
+ temp3 <- 1 + Shape * temp2
+ dl.dshape <- 1 / Shape + 1 / (Scale * (1 + Shape)) -
+ (1 + 1 / Scale) * temp2 / temp3
+ dl.dscale <- ysim - log1p(Shape) / Scale^2 +
+ log1p(Shape * temp2) / Scale^2 -
+ (1 + 1 / Scale) * Shape * ysim * temp2 / temp3
+
+
+ temp7 <- cbind(dl.dshape, dl.dscale)
+if (ii < 3) {
+}
+ run.varcov <- run.varcov +
+ temp7[, ind1$row.index] *
+ temp7[, ind1$col.index]
+ }
+ run.varcov <- cbind(run.varcov / .nsimEIM )
+
+ }
+
+
+
+ wz1 <- if (intercept.only)
+ matrix(colMeans(run.varcov),
+ nrow = n, ncol = ncol(run.varcov), byrow = TRUE) else
+ run.varcov
+
+ wz1 <- wz1 * dThetas.detas[, Musual * (spp. - 1) + ind1$row] *
+ dThetas.detas[, Musual * (spp. - 1) + ind1$col]
+
+
+ for(jay in 1:Musual)
+ for(kay in jay:Musual) {
+ cptr <- iam((spp. - 1) * Musual + jay,
+ (spp. - 1) * Musual + kay,
+ M = M)
+ wz[, cptr] <- wz1[, iam(jay, kay, M = Musual)]
+ }
+ } # End of for(spp.) loop
+
+
+
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
+ }), list( .l.scale = l.scale,
+ .e.scale = e.scale,
+ .nsimEIM = nsimEIM, .oim.mean = oim.mean ))))
+} # perks()
+
+
+
+
+
+
+
+
+dmakeham <- function(x, shape, scale = 1, epsilon = 0, log = FALSE) {
+
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ LLL <- max(length(x), length(shape), length(scale), length(epsilon))
+ if (length(x) != LLL) x <- rep(x, length.out = LLL)
+ if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
+ if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+ if (length(epsilon) != LLL) epsilon <- rep(epsilon, length.out = LLL)
+
+ index0 = (x < 0)
+ ans <- log(epsilon * exp(-x * scale) + shape) +
+ x * (scale - epsilon) -
+ (shape / scale) * expm1(x * scale)
+ ans[index0] <- log(0)
+ ans[x == Inf] <- log(0)
+ if (log.arg) {
+ } else {
+ ans <- exp(ans)
+ ans[index0] <- 0
+ ans[x == Inf] <- 0
+ }
+ ans[shape <= 0 | scale <= 0 | epsilon < 0] <- NaN
+ ans
+}
+
+
+
+pmakeham <- function(q, shape, scale = 1, epsilon = 0) {
+
+ LLL <- max(length(q), length(shape), length(scale), length(epsilon))
+ if (length(q) != LLL) q <- rep(q, length.out = LLL)
+ if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
+ if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+ if (length(epsilon) != LLL) epsilon <- rep(epsilon, length.out = LLL)
+
+
+ ans <- -expm1(-q * epsilon - (shape / scale) * expm1(scale * q))
+ ans[(q <= 0)] <- 0
+ ans[shape <= 0 | scale <= 0 | epsilon < 0] <- NaN
+ ans[q == Inf] <- 1
+ ans
+}
+
+
+
+qmakeham <- function(p, shape, scale = 1, epsilon = 0) {
+
+ LLL <- max(length(p), length(shape), length(scale), length(epsilon))
+ if (length(p) != LLL) p <- rep(p, length.out = LLL)
+ if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
+ if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+ if (length(epsilon) != LLL) epsilon <- rep(epsilon, length.out = LLL)
+
+
+ ans <- shape / (scale * epsilon) - log1p(-p) / epsilon -
+ lambertW((shape / epsilon) * exp(shape / epsilon) *
+ (1 - p)^(-(scale / epsilon))) / scale
+ ans[epsilon == 0] <-
+ qgompertz(p = p[epsilon == 0],
+ shape = shape[epsilon == 0],
+ scale = scale[epsilon == 0])
+ ans[p < 0] <- NaN
+ ans[p == 0] <- 0
+ ans[p == 1] <- Inf
+ ans[p > 1] <- NaN
+ ans[shape <= 0 | scale <= 0 | epsilon < 0] <- NaN
+ ans
+}
+
+
+rmakeham <- function(n, shape, scale = 1, epsilon = 0) {
+ qmakeham(runif(n), shape = shape, scale = scale, epsilon = epsilon)
+}
+
+
+
+
+makeham.control <- function(save.weight = TRUE, ...)
+{
+ list(save.weight = save.weight)
+}
+
+
+ makeham <-
+ function(lshape = "loge", lscale = "loge", lepsilon = "loge",
+ ishape = NULL, iscale = NULL, iepsilon = 0.3,
+ nsimEIM = 500,
+ oim.mean = TRUE,
+ zero = NULL)
+{
+
+
+
+
+
+ lepsil <- lepsilon
+ iepsil <- iepsilon
+
+
+ lshape <- as.list(substitute(lshape))
+ e.shape <- link2list(lshape)
+ l.shape <- attr(e.shape, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ e.scale <- link2list(lscale)
+ l.scale <- attr(e.scale, "function.name")
+
+ lepsil <- as.list(substitute(lepsil))
+ e.epsil <- link2list(lepsil)
+ l.epsil <- attr(e.epsil, "function.name")
+
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE))
+ stop("bad input for argument 'nsimEIM'")
+ if (nsimEIM <= 50)
+ warning("argument 'nsimEIM' should be an integer ",
+ "greater than 50, say")
+
+
+ if (length(ishape))
+ if (!is.Numeric(ishape, positive = TRUE))
+ stop("argument 'ishape' values must be positive")
+ if (length(iscale))
+ if (!is.Numeric(iscale, positive = TRUE))
+ stop("argument 'iscale' values must be positive")
+ if (length(iepsil))
+ if (!is.Numeric(iepsil, positive = TRUE))
+ stop("argument 'iepsil' values must be positive")
+
+
+
+
+
+ if (!is.logical(oim.mean) || length(oim.mean) != 1)
+ stop("bad input for argument 'oim.mean'")
+
+
+
+
+ new("vglmff",
+ blurb = c("Makeham distribution\n\n",
+ "Links: ",
+ namesof("shape", l.shape, e.shape), ", ",
+ namesof("scale", l.scale, e.scale), ", ",
+ namesof("epsilon", l.epsil, e.epsil), "\n",
+ "Median: qmakeham(p = 0.5, shape, scale, epsilon)"),
+
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 3
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 3,
+ nsimEIM = .nsimEIM,
+ zero = .zero )
+ }, list( .zero = zero,
+ .nsimEIM = nsimEIM ))),
+ initialize = eval(substitute(expression({
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ ncoly <- ncol(y)
+
+ Musual <- 3
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+ mynames1 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames2 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames3 <- paste("epsilon", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ c(namesof(mynames1, .l.shape , .e.shape , tag = FALSE),
+ namesof(mynames2, .l.scale , .e.scale , tag = FALSE),
+ namesof(mynames3, .l.epsil , .e.epsil , tag = FALSE))[
+ interleave.VGAM(M, M = Musual)]
+
+
+ if (!length(etastart)) {
+
+ matC <- matrix(if (length( .iscale )) .iscale else 0 + NA,
+ n, ncoly, byrow = TRUE)
+ matH <- matrix(if (length( .ishape )) .ishape else 0 + NA,
+ n, ncoly, byrow = TRUE)
+
+ matE <- matrix(if (length( .iepsil )) .iepsil else 0.3,
+ n, ncoly, byrow = TRUE)
+
+
+ shape.grid <- c(exp(-seq(4, 0.1, len = 05)), 1,
+ exp( seq(0.1, 4, len = 05)))
+ scale.grid <- c(exp(-seq(4, 0.1, len = 05)), 1,
+ exp( seq(0.1, 4, len = 05)))
+
+
+
+ for (spp. in 1:ncoly) {
+ yvec <- y[, spp.]
+ wvec <- w[, spp.]
+
+ makeham.Loglikfun <- function(scaleval, y, x, w, extraargs) {
+ ans <-
+ sum(c(w) * dmakeham(x = y, shape = extraargs$Shape,
+ epsilon = extraargs$Epsil,
+ scale = scaleval, log = TRUE))
+ ans
+ }
+
+ mymat <- matrix(-1, length(shape.grid), 2)
+ for (jlocal in 1:length(shape.grid)) {
+ mymat[jlocal, ] <-
+ getMaxMin(scale.grid,
+ objfun = makeham.Loglikfun,
+ y = yvec, x = x, w = wvec,
+ ret.objfun = TRUE,
+ extraargs = list(Shape = shape.grid[jlocal],
+ Epsil = matE[1, spp.]))
+ }
+ index.shape <- which(mymat[, 2] == max(mymat[, 2]))[1]
+
+ if (!length( .ishape ))
+ matH[, spp.] <- shape.grid[index.shape]
+ if (!length( .iscale ))
+ matC[, spp.] <- mymat[index.shape, 1]
+ } # spp.
+
+
+
+
+
+ epsil.grid <- c(exp(-seq(4, 0.1, len = 05)), 1,
+ exp( seq(0.1, 1, len = 05)))
+ for (spp. in 1:ncoly) {
+ yvec <- y[, spp.]
+ wvec <- w[, spp.]
+
+ makeham.Loglikfun2 <- function(epsilval, y, x, w, extraargs) {
+ ans <-
+ sum(c(w) * dmakeham(x = y, shape = extraargs$Shape,
+ epsilon = epsilval,
+ scale = extraargs$Scale, log = TRUE))
+ ans
+ }
+ Init.epsil <-
+ getMaxMin(epsil.grid,
+ objfun = makeham.Loglikfun2,
+ y = yvec, x = x, w = wvec,
+ extraargs = list(Shape = matH[1, spp.],
+ Scale = matC[1, spp.]))
+
+ matE[, spp.] <- Init.epsil
+ } # spp.
+
+
+ etastart <- cbind(theta2eta(matH, .l.shape , .e.shape ),
+ theta2eta(matC, .l.scale , .e.scale ),
+ theta2eta(matE, .l.epsil , .e.epsil ))[,
+ interleave.VGAM(M, M = Musual)]
+ } # End of !length(etastart)
+ }), list(
+ .l.shape = l.shape, .l.scale = l.scale, .l.epsil = l.epsil,
+ .e.shape = e.shape, .e.scale = e.scale, .e.epsil = e.epsil,
+ .ishape = ishape, .iscale = iscale, .iepsil = iepsil
+ ))),
+
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ shape <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .l.shape , .e.shape )
+ scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .l.scale , .e.scale )
+ epsil <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .l.epsil , .e.epsil )
+ qmakeham(p = 0.5, shape = shape, scale = scale, epsil = epsil)
+ }, list(
+ .l.shape = l.shape, .l.scale = l.scale, .l.epsil = l.epsil,
+ .e.shape = e.shape, .e.scale = e.scale, .e.epsil = e.epsil
+ ))),
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+ misc$link <-
+ c(rep( .l.shape , length = ncoly),
+ rep( .l.scale , length = ncoly),
+ rep( .l.epsil , length = ncoly))[interleave.VGAM(M, M = Musual)]
+ temp.names <- c(mynames1, mynames2, mynames3)[
+ interleave.VGAM(M, M = Musual)]
+ names(misc$link) <- temp.names
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- temp.names
+ for(ii in 1:ncoly) {
+ misc$earg[[Musual*ii-2]] <- .e.shape
+ misc$earg[[Musual*ii-1]] <- .e.scale
+ misc$earg[[Musual*ii ]] <- .e.epsil
+ }
+
+ misc$Musual <- Musual
+ misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
+ misc$nsimEIM <- .nsimEIM
+ }), list(
+ .l.shape = l.shape, .l.scale = l.scale, .l.epsil = l.epsil,
+ .e.shape = e.shape, .e.scale = e.scale, .e.epsil = e.epsil,
+ .nsimEIM = nsimEIM ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ shape <- eta2theta(eta[, c(TRUE, FALSE, FALSE)], .l.shape , .e.shape )
+ scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE)], .l.scale , .e.scale )
+ epsil <- eta2theta(eta[, c(FALSE, FALSE, TRUE)], .l.epsil , .e.epsil )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dmakeham(x = y, shape = shape, scale = scale,
+ epsil = epsil, log = TRUE))
+ }
+ }, list(
+ .l.shape = l.shape, .l.scale = l.scale, .l.epsil = l.epsil,
+ .e.shape = e.shape, .e.scale = e.scale, .e.epsil = e.epsil
+ ))),
+ vfamily = c("makeham"),
+
+ deriv = eval(substitute(expression({
+ Musual <- 3
+ shape <- eta2theta(eta[, c(TRUE, FALSE, FALSE), drop = FALSE],
+ .l.shape , .e.shape )
+ scale <- eta2theta(eta[, c(FALSE, TRUE, FALSE), drop = FALSE],
+ .l.scale , .e.scale )
+ epsil <- eta2theta(eta[, c(FALSE, FALSE, TRUE), drop = FALSE],
+ .l.epsil , .e.epsil )
+
+
+ temp2 <- exp(y * scale)
+ temp3 <- epsil + shape * temp2
+ dl.dshape <- temp2 / temp3 - expm1(y * scale) / scale
+ dl.dscale <- shape * y * temp2 / temp3 +
+ shape * expm1(y * scale) / scale^2 -
+ shape * y * temp2 / scale
+
+ dl.depsil <- 1 / temp3 - y
+
+ dshape.deta <- dtheta.deta(shape, .l.shape , .e.shape )
+ dscale.deta <- dtheta.deta(scale, .l.scale , .e.scale )
+ depsil.deta <- dtheta.deta(epsil, .l.epsil , .e.epsil )
+
+ dthetas.detas <- cbind(dshape.deta, dscale.deta, depsil.deta)
+ myderiv <- c(w) * cbind(dl.dshape,
+ dl.dscale,
+ dl.depsil) * dthetas.detas
+ myderiv[, interleave.VGAM(M, M = Musual)]
+ }), list(
+ .l.shape = l.shape, .l.scale = l.scale, .l.epsil = l.epsil,
+ .e.shape = e.shape, .e.scale = e.scale, .e.epsil = e.epsil
+ ))),
+
+
+ weight = eval(substitute(expression({
+
+ NOS <- M / Musual
+ dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = Musual)]
+
+ wz <- matrix(0.0, n, M + M - 1 + M - 2) # wz has half-bw 3
+
+ ind1 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
+
+
+ for(spp. in 1:NOS) {
+ run.varcov <- 0
+ Shape <- shape[, spp.]
+ Scale <- scale[, spp.]
+ Epsil <- epsil[, spp.]
+
+
+
+
+ if (FALSE && intercept.only && .oim.mean ) {
+
+ temp8 <- (1 + Shape * exp(Scale * y[, spp.]))^2
+ nd2l.dadb <- 2 * y[, spp.] * exp(Scale * y[, spp.]) / temp8
+
+ nd2l.dada <- 1 / Shape^2 + 1 / (1 + Shape)^2 -
+ 2 * exp(2 * Scale * y[, spp.]) / temp8
+
+ nd2l.dbdb <- 2 * Shape * y[, spp.]^2 * exp(Scale * y[, spp.]) / temp8
+
+
+ ave.oim11 <- weighted.mean(nd2l.dada, w[, spp.])
+ ave.oim12 <- weighted.mean(nd2l.dadb, w[, spp.])
+ ave.oim22 <- weighted.mean(nd2l.dbdb, w[, spp.])
+ run.varcov <- cbind(ave.oim11, ave.oim22, ave.oim12)
+ } else {
+
+ for(ii in 1:( .nsimEIM )) {
+ ysim <- rmakeham(n = n, shape = Shape, scale = Scale,
+ epsil = Epsil)
+if (ii < 3) {
+}
+
+ temp2 <- exp(ysim * Scale)
+ temp3 <- Epsil + Shape * temp2
+ if (!is.Numeric(temp2))
+ stop("temp2 is not Numeric")
+ if (!is.Numeric(temp3))
+ stop("temp3 is not Numeric")
+ dl.dshape <- temp2 / temp3 - expm1(ysim * Scale) / Scale
+ dl.dscale <- Shape * ysim * temp2 / temp3 +
+ Shape * expm1(ysim * Scale) / Scale^2 -
+ Shape * ysim * temp2 / Scale
+ dl.depsil <- 1 / temp3 - ysim
+
+
+
+ temp7 <- cbind(dl.dshape, dl.dscale, dl.depsil)
+if (ii < 3) {
+}
+ run.varcov <- run.varcov +
+ temp7[, ind1$row.index] *
+ temp7[, ind1$col.index]
+ }
+ run.varcov <- cbind(run.varcov / .nsimEIM )
+
+ }
+
+
+
+ for (ilocal in 1:ncol(run.varcov)) {
+ indexInf <- is.finite(run.varcov[, ilocal])
+ run.varcov[!indexInf, ilocal] <-
+ mean(run.varcov[indexInf, ilocal])
+ }
+
+
+
+ wz1 <- if (intercept.only)
+ matrix(colMeans(run.varcov, na.rm = TRUE),
+ nrow = n, ncol = ncol(run.varcov), byrow = TRUE) else
+ run.varcov
+
+
+ wz1 <- wz1 * dThetas.detas[, Musual * (spp. - 1) + ind1$row] *
+ dThetas.detas[, Musual * (spp. - 1) + ind1$col]
+
+
+ for(jay in 1:Musual)
+ for(kay in jay:Musual) {
+ cptr <- iam((spp. - 1) * Musual + jay,
+ (spp. - 1) * Musual + kay,
+ M = M)
+ wz[, cptr] <- wz1[, iam(jay, kay, M = Musual)]
+ }
+ } # End of for(spp.) loop
+
+
+
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
+ }), list(
+ .l.shape = l.shape, .l.scale = l.scale, .l.epsil = l.epsil,
+ .e.shape = e.shape, .e.scale = e.scale, .e.epsil = e.epsil,
+ .nsimEIM = nsimEIM, .oim.mean = oim.mean ))))
+} # makeham()
+
+
+
+
+
+
+
+
+dgompertz <- function(x, shape, scale = 1, log = FALSE) {
+
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ LLL <- max(length(x), length(shape), length(scale))
+ if (length(x) != LLL) x <- rep(x, length.out = LLL)
+ if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
+ if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+
+
+ index0 <- (x < 0)
+ index1 <- abs(x * scale) < 0.1 & is.finite(x * scale)
+ ans <- log(shape) + x * scale - (shape / scale) * (exp(x * scale) - 1)
+ ans[index1] <- log(shape[index1]) + x[index1] * scale[index1] -
+ (shape[index1] / scale[index1]) *
+ expm1(x[index1] * scale[index1])
+ ans[index0] <- log(0)
+ ans[x == Inf] <- log(0)
+ if (log.arg) {
+ } else {
+ ans <- exp(ans)
+ ans[index0] <- 0
+ ans[x == Inf] <- 0
+ }
+ ans[shape <= 0 | scale <= 0] <- NaN
+ ans
+}
+
+
+
+pgompertz <- function(q, shape, scale = 1) {
+
+ LLL <- max(length(q), length(shape), length(scale))
+ if (length(q) != LLL) q <- rep(q, length.out = LLL)
+ if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
+ if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+
+ ans <- -expm1((-shape / scale) * expm1(scale * q))
+ ans[(q <= 0)] <- 0
+ ans[shape <= 0 | scale <= 0] <- NaN
+ ans[q == Inf] <- 1
+ ans
+}
+
+
+qgompertz <- function(p, shape, scale = 1) {
+
+ LLL <- max(length(p), length(shape), length(scale))
+ if (length(p) != LLL) p <- rep(p, length.out = LLL)
+ if (length(shape) != LLL) shape <- rep(shape, length.out = LLL)
+ if (length(scale) != LLL) scale <- rep(scale, length.out = LLL)
+
+ ans <- log1p((-scale / shape) * log1p(-p)) / scale
+ ans[p < 0] <- NaN
+ ans[p == 0] <- 0
+ ans[p == 1] <- Inf
+ ans[p > 1] <- NaN
+ ans[shape <= 0 | scale <= 0] <- NaN
+ ans
+}
+
+
+rgompertz <- function(n, shape, scale = 1) {
+ qgompertz(runif(n), shape = shape, scale = scale)
+}
+
+
+
+
+
+
+
+gompertz.control <- function(save.weight = TRUE, ...)
+{
+ list(save.weight = save.weight)
+}
+
+
+ gompertz <-
+ function(lshape = "loge", lscale = "loge",
+ ishape = NULL, iscale = NULL,
+ nsimEIM = 500,
+ zero = NULL)
+{
+
+
+
+ lshape <- as.list(substitute(lshape))
+ e.shape <- link2list(lshape)
+ l.shape <- attr(e.shape, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ e.scale <- link2list(lscale)
+ l.scale <- attr(e.scale, "function.name")
+
+
+
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE))
+ stop("bad input for argument 'nsimEIM'")
+ if (nsimEIM <= 50)
+ warning("argument 'nsimEIM' should be an integer ",
+ "greater than 50, say")
+
+
+ if (length(ishape))
+ if (!is.Numeric(ishape, positive = TRUE))
+ stop("argument 'ishape' values must be positive")
+ if (length(iscale))
+ if (!is.Numeric(iscale, positive = TRUE))
+ stop("argument 'iscale' values must be positive")
+
+
+
+
+
+ new("vglmff",
+ blurb = c("Gompertz distribution\n\n",
+ "Links: ",
+ namesof("shape", l.shape, e.shape ), ", ",
+ namesof("scale", l.scale, e.scale ), "\n",
+ "Median: scale * log(2 - 1 / shape)"),
+
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 2
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ nsimEIM = .nsimEIM,
+ zero = .zero )
+ }, list( .zero = zero,
+ .nsimEIM = nsimEIM ))),
+ initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+ ncoly <- ncol(y)
+ Musual <- 2
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+ mynames1 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames2 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ c(namesof(mynames1, .l.shape , .e.shape , tag = FALSE),
+ namesof(mynames2, .l.scale , .e.scale , tag = FALSE))[
+ interleave.VGAM(M, M = Musual)]
+
+
+
+ if (!length(etastart)) {
+
+ matH <- matrix(if (length( .ishape )) .ishape else 0 + NA,
+ n, ncoly, byrow = TRUE)
+ matC <- matrix(if (length( .iscale )) .iscale else 0 + NA,
+ n, ncoly, byrow = TRUE)
+
+ shape.grid <- c(exp(-seq(4, 0.1, len = 07)), 1,
+ exp( seq(0.1, 4, len = 07)))
+ scale.grid <- c(exp(-seq(4, 0.1, len = 07)), 1,
+ exp( seq(0.1, 4, len = 07)))
+
+ for (spp. in 1:ncoly) {
+ yvec <- y[, spp.]
+ wvec <- w[, spp.]
+
+
+ gompertz.Loglikfun <- function(scaleval, y, x, w, extraargs) {
+ ans <-
+ sum(c(w) * dgompertz(x = y, shape = extraargs$Shape,
+ scale = scaleval, log = TRUE))
+ ans
+ }
+
+ mymat <- matrix(-1, length(shape.grid), 2)
+ for (jlocal in 1:length(shape.grid)) {
+ mymat[jlocal, ] <-
+ getMaxMin(scale.grid,
+ objfun = gompertz.Loglikfun,
+ y = yvec, x = x, w = wvec,
+ ret.objfun = TRUE,
+ extraargs = list(Shape = shape.grid[jlocal]))
+ }
+ index.shape <- which(mymat[, 2] == max(mymat[, 2]))[1]
+
+ if (!length( .ishape ))
+ matH[, spp.] <- shape.grid[index.shape]
+ if (!length( .iscale ))
+ matC[, spp.] <- mymat[index.shape, 1]
+ } # spp.
+
+ etastart <- cbind(theta2eta(matH, .l.shape , .e.shape ),
+ theta2eta(matC, .l.scale , .e.scale ))[,
+ interleave.VGAM(M, M = Musual)]
+ } # End of !length(etastart)
+ }), list( .l.shape = l.shape, .l.scale = l.scale,
+ .e.shape = e.shape, .e.scale = e.scale,
+ .ishape = ishape, .iscale = iscale
+ ))),
+
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ shape <- eta2theta(eta[, c(TRUE, FALSE)], .l.shape , .e.shape )
+ scale <- eta2theta(eta[, c(FALSE, TRUE)], .l.scale , .e.scale )
+ log1p((scale / shape) * log(2)) / scale
+ }, list( .l.shape = l.shape, .l.scale = l.scale,
+ .e.shape = e.shape, .e.scale = e.scale ))),
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+ misc$link <-
+ c(rep( .l.shape , length = ncoly),
+ rep( .l.scale , length = ncoly))[interleave.VGAM(M, M = Musual)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+ names(misc$link) <- temp.names
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- temp.names
+ for(ii in 1:ncoly) {
+ misc$earg[[Musual*ii-1]] <- .e.shape
+ misc$earg[[Musual*ii ]] <- .e.scale
+ }
+
+ misc$Musual <- Musual
+ misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
+ misc$nsimEIM <- .nsimEIM
+ }), list( .l.shape = l.shape, .l.scale = l.scale,
+ .e.shape = e.shape, .e.scale = e.scale,
+ .nsimEIM = nsimEIM ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ shape <- eta2theta(eta[, c(TRUE, FALSE)], .l.shape , .e.shape )
+ scale <- eta2theta(eta[, c(FALSE, TRUE)], .l.scale , .e.scale )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dgompertz(x = y, shape = shape,
+ scale = scale, log = TRUE))
+ }
+ }, list( .l.shape = l.shape, .l.scale = l.scale,
+ .e.shape = e.shape, .e.scale = e.scale ))),
+ vfamily = c("gompertz"),
+
+ deriv = eval(substitute(expression({
+ Musual <- 2
+ shape <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .l.shape ,
+ .e.shape )
+ scale <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .l.scale ,
+ .e.scale )
+
+
+ temp2 <- exp(y * scale)
+ temp4 <- -expm1(y * scale)
+ dl.dshape <- 1 / shape + temp4 / scale
+ dl.dscale <- y * (1 - shape * temp2 / scale) -
+ shape * temp4 / scale^2
+
+ dshape.deta <- dtheta.deta(shape, .l.shape , .e.shape )
+ dscale.deta <- dtheta.deta(scale, .l.scale , .e.scale )
+
+ dthetas.detas <- cbind(dshape.deta, dscale.deta)
+ myderiv <- c(w) * cbind(dl.dshape, dl.dscale) * dthetas.detas
+ myderiv[, interleave.VGAM(M, M = Musual)]
+ }), list( .l.shape = l.shape, .l.scale = l.scale,
+ .e.shape = e.shape, .e.scale = e.scale ))),
+
+
+ weight = eval(substitute(expression({
+
+ NOS <- M / Musual
+ dThetas.detas <- dthetas.detas[, interleave.VGAM(M, M = Musual)]
+
+ wz <- matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
+
+ ind1 <- iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
+
+
+ for(spp. in 1:NOS) {
+ run.varcov <- 0
+ Shape <- shape[, spp.]
+ Scale <- scale[, spp.]
+
+ for(ii in 1:( .nsimEIM )) {
+ ysim <- rgompertz(n = n, shape = Shape, scale = Scale)
+if (ii < 3) {
+}
+
+ temp2 <- exp(ysim * scale)
+ temp4 <- -expm1(ysim * scale)
+ dl.dshape <- 1 / shape + temp4 / scale
+ dl.dscale <- ysim * (1 - shape * temp2 / scale) -
+ shape * temp4 / scale^2
+
+
+ temp7 <- cbind(dl.dshape, dl.dscale)
+ run.varcov <- run.varcov +
+ temp7[, ind1$row.index] *
+ temp7[, ind1$col.index]
+ }
+ run.varcov <- cbind(run.varcov / .nsimEIM )
+
+ wz1 <- if (intercept.only)
+ matrix(colMeans(run.varcov),
+ nrow = n, ncol = ncol(run.varcov), byrow = TRUE) else
+ run.varcov
+
+ wz1 <- wz1 * dThetas.detas[, Musual * (spp. - 1) + ind1$row] *
+ dThetas.detas[, Musual * (spp. - 1) + ind1$col]
+
+
+ for(jay in 1:Musual)
+ for(kay in jay:Musual) {
+ cptr <- iam((spp. - 1) * Musual + jay,
+ (spp. - 1) * Musual + kay,
+ M = M)
+ wz[, cptr] <- wz1[, iam(jay, kay, M = Musual)]
+ }
+ } # End of for(spp.) loop
+
+
+
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
+ }), list( .l.scale = l.scale,
+ .e.scale = e.scale,
+ .nsimEIM = nsimEIM ))))
+} # gompertz()
+
+
+
+
+
+
+dmoe <- function (x, alpha = 1, lambda = 1, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ LLL <- max(length(x), length(alpha), length(lambda))
+ if (length(x) != LLL) x <- rep(x, length.out = LLL)
+ if (length(alpha) != LLL) alpha <- rep(alpha, length.out = LLL)
+ if (length(lambda) != LLL) lambda <- rep(lambda, length.out = LLL)
+
+ index0 = (x < 0)
+ if (log.arg) {
+ ans <- log(lambda) + (lambda * x) -
+ 2 * log(expm1(lambda * x) + alpha)
+ ans[index0] <- log(0)
+ } else {
+ ans <- lambda * exp(lambda * x) / (expm1(lambda * x) + alpha)^2
+ ans[index0] <- 0
+ }
+ ans[alpha <= 0 | lambda <= 0] <- NaN
+ ans
+}
+
+
+
+pmoe <- function (q, alpha = 1, lambda = 1) {
+ ret <- ifelse(alpha <= 0 | lambda <= 0, NaN,
+ 1 - 1 / (expm1(lambda * q) + alpha))
+ ret[q < log(2 - alpha) / lambda] <- 0
+ ret
+}
+
+
+
+qmoe <- function (p, alpha = 1, lambda = 1) {
+ ifelse(p < 0 | p > 1 | alpha <= 0 | lambda <= 0, NaN,
+ log1p(-alpha + 1 / (1 - p)) / lambda)
+}
+
+
+
+rmoe <- function (n, alpha = 1, lambda = 1)
+{
+
+ qmoe(p = runif(n), alpha = alpha, lambda = lambda)
+}
+
+
+
+
+exponential.mo.control <- function(save.weight = TRUE, ...)
+{
+ list(save.weight = save.weight)
+}
+
+
+
+
+ exponential.mo <-
+ function(lalpha = "loge", llambda = "loge",
+ ealpha = list(), elambda = list(),
+ ialpha = 1, ilambda = NULL,
+ imethod = 1,
+ nsimEIM = 200,
+ zero = NULL)
+{
+
+ stop("fundamentally unable to estimate the parameters as ",
+ "the support of the density depends on the parameters")
+
+
+ lalpha <- as.list(substitute(lalpha))
+ ealpha <- link2list(lalpha)
+ lalpha <- attr(ealpha, "function.name")
+
+ llambda <- as.list(substitute(llambda))
+ elambda <- link2list(llambda)
+ llambda <- attr(elambda, "function.name")
+
+ lalpha0 <- lalpha
+ ealpha0 <- ealpha
+ ialpha0 <- ialpha
+
+
+
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE))
+ stop("bad input for argument 'nsimEIM'")
+ if (nsimEIM <= 50)
+ warning("argument 'nsimEIM' should be an integer ",
+ "greater than 50, say")
+
+ if (length(ialpha0))
+ if (!is.Numeric(ialpha0, positive = TRUE))
+ stop("argument 'ialpha' values must be positive")
+ if (length(ilambda))
+ if (!is.Numeric(ilambda, positive = TRUE))
+ stop("argument 'ilambda' values must be positive")
+
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+
+
+
+ new("vglmff",
+ blurb = c("Marshall-Olkin exponential distribution\n\n",
+ "Links: ",
+ namesof("alpha", lalpha0, ealpha0 ), ", ",
+ namesof("lambda", llambda, elambda ), "\n",
+ "Median: log(3 - alpha) / lambda"),
+
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 2
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ nsimEIM = .nsimEIM,
+ zero = .zero )
+ }, list( .zero = zero,
+ .nsimEIM = nsimEIM ))),
+ initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+ ncoly <- ncol(y)
+
+ Musual <- 2
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+ mynames1 <- paste("alpha", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames2 <- paste("lambda", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ c(namesof(mynames1, .lalpha0 , .ealpha0 , tag = FALSE),
+ namesof(mynames2, .llambda , .elambda , tag = FALSE))[
+ interleave.VGAM(M, M = Musual)]
+
+
+
+ if (!length(etastart)) {
+
+ matL <- matrix(if (length( .ilambda )) .ilambda else 0,
+ n, ncoly, byrow = TRUE)
+ matA <- matrix(if (length( .ialpha0 )) .ialpha0 else 0,
+ n, ncoly, byrow = TRUE)
+
+
+ for (spp. in 1:ncoly) {
+ yvec <- y[, spp.]
+
+ moexpon.Loglikfun <- function(lambdaval, y, x, w, extraargs) {
+ ans <-
+ sum(c(w) * log(dmoe(x = y, alpha = extraargs$alpha,
+ lambda = lambdaval)))
+ ans
+ }
+ Alpha.init <- .ialpha0
+ lambda.grid <- seq(0.1, 10.0, len = 21)
+ Lambda.init <- getMaxMin(lambda.grid,
+ objfun = moexpon.Loglikfun,
+ y = y, x = x, w = w,
+ extraargs = list(alpha = Alpha.init))
+
+ if (length(mustart)) {
+ Lambda.init <- Lambda.init / (1 - Phimat.init)
+ }
+
+ if (!length( .ialpha0 ))
+ matA[, spp.] <- Alpha0.init
+ if (!length( .ilambda ))
+ matL[, spp.] <- Lambda.init
+ } # spp.
+
+ etastart <- cbind(theta2eta(matA, .lalpha0, .ealpha0 ),
+ theta2eta(matL, .llambda, .elambda ))[,
+ interleave.VGAM(M, M = Musual)]
+ mustart <- NULL # Since etastart has been computed.
+ } # End of !length(etastart)
+ }), list( .lalpha0 = lalpha0, .llambda = llambda,
+ .ealpha0 = ealpha0, .elambda = elambda,
+ .ialpha0 = ialpha0, .ilambda = ilambda,
+ .imethod = imethod
+ ))),
+
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ alpha0 = eta2theta(eta[, c(TRUE, FALSE)], .lalpha0 , .ealpha0 )
+ lambda = eta2theta(eta[, c(FALSE, TRUE)], .llambda , .elambda )
+ log(3 - alpha0) / lambda
+ }, list( .lalpha0 = lalpha0, .llambda = llambda,
+ .ealpha0 = ealpha0, .elambda = elambda ))),
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+ misc$link <-
+ c(rep( .lalpha0 , length = ncoly),
+ rep( .llambda , length = ncoly))[interleave.VGAM(M, M = Musual)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+ names(misc$link) <- temp.names
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- temp.names
+ for(ii in 1:ncoly) {
+ misc$earg[[Musual*ii-1]] <- .ealpha0
+ misc$earg[[Musual*ii ]] <- .elambda
+ }
+
+ misc$Musual <- Musual
+ misc$imethod <- .imethod
+ misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
+ misc$nsimEIM = .nsimEIM
+ }), list( .lalpha0 = lalpha0, .llambda = llambda,
+ .ealpha0 = ealpha0, .elambda = elambda,
+ .nsimEIM = nsimEIM,
+ .imethod = imethod ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ alpha0 = eta2theta(eta[, c(TRUE, FALSE)], .lalpha0 , .ealpha0 )
+ lambda = eta2theta(eta[, c(FALSE, TRUE)], .llambda , .elambda )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * log(dmoe(x = y, alpha = alpha0,
+ lambda = lambda)))
+ }
+ }, list( .lalpha0 = lalpha0, .llambda = llambda,
+ .ealpha0 = ealpha0, .elambda = elambda ))),
+ vfamily = c("exponential.mo"),
+
+ deriv = eval(substitute(expression({
+ Musual <- 2
+ alpha0 = eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lalpha0 ,
+ .ealpha0 )
+ lambda = eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .llambda ,
+ .elambda )
+
+ temp2 = (expm1(lambda * y) + alpha0)
+ dl.dalpha0 = -2 / temp2
+ dl.dlambda = 1 / lambda + y - 2 * y * exp(lambda * y) / temp2
+
+ dalpha0.deta = dtheta.deta(alpha0, .lalpha0 , .ealpha0 )
+ dlambda.deta = dtheta.deta(lambda, .llambda , .elambda )
+
+ dthetas.detas = cbind(dalpha0.deta,
+ dlambda.deta)
+ myderiv = c(w) * cbind(dl.dalpha0, dl.dlambda) * dthetas.detas
+ myderiv[, interleave.VGAM(M, M = Musual)]
+ }), list( .lalpha0 = lalpha0, .llambda = llambda,
+ .ealpha0 = ealpha0, .elambda = elambda ))),
+
+
+ weight = eval(substitute(expression({
+
+ NOS = M / Musual
+ dThetas.detas = dthetas.detas[, interleave.VGAM(M, M = Musual)]
+
+ wz = matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
+
+ ind1 = iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
+
+
+ for(spp. in 1:NOS) {
+ run.varcov = 0
+ Alph = alpha0[, spp.]
+ Lamb = lambda[, spp.]
+
+ for(ii in 1:( .nsimEIM )) {
+ ysim = rmoe(n = n, alpha = Alph, lambda = Lamb)
+if (ii < 3) {
+}
+
+ temp2 = (expm1(lambda * ysim) + alpha0)
+ dl.dalpha0 = -2 / temp2
+ dl.dlambda = 1 / lambda + ysim -
+ 2 * ysim * exp(lambda * ysim) / temp2
+
+
+ temp3 = cbind(dl.dalpha0, dl.dlambda)
+ run.varcov = run.varcov +
+ temp3[, ind1$row.index] *
+ temp3[, ind1$col.index]
+ }
+ run.varcov = cbind(run.varcov / .nsimEIM)
+
+ wz1 = if (intercept.only)
+ matrix(colMeans(run.varcov),
+ nrow = n, ncol = ncol(run.varcov), byrow = TRUE) else
+ run.varcov
+
+ wz1 = wz1 * dThetas.detas[, Musual * (spp. - 1) + ind1$row] *
+ dThetas.detas[, Musual * (spp. - 1) + ind1$col]
+
+
+ for(jay in 1:Musual)
+ for(kay in jay:Musual) {
+ cptr = iam((spp. - 1) * Musual + jay,
+ (spp. - 1) * Musual + kay,
+ M = M)
+ wz[, cptr] = wz1[, iam(jay, kay, M = Musual)]
+ }
+ } # End of for(spp.) loop
+
+
+
+
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
+ }), list( .llambda = llambda,
+ .elambda = elambda,
+ .nsimEIM = nsimEIM ))))
+} # exponential.mo()
+
+
+
+
+
diff --git a/R/family.aunivariate.R b/R/family.aunivariate.R
index 7d0e828..677d5b0 100644
--- a/R/family.aunivariate.R
+++ b/R/family.aunivariate.R
@@ -13,10 +13,12 @@
dkumar <- function(x, shape1, shape2, log = FALSE) {
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
+
+
N <- max(length(x), length(shape1), length(shape2))
x <- rep(x, len = N); shape1 <- rep(shape1, len = N);
shape2 <- rep(shape2, len = N)
@@ -51,7 +53,7 @@ qkumar <- function(p, shape1, shape2) {
}
-pkumar = function(q, shape1, shape2) {
+pkumar <- function(q, shape1, shape2) {
ans <- 1.0 - (1.0 - q^shape1)^shape2
ans[q <= 0] <- 0
@@ -65,48 +67,96 @@ pkumar = function(q, shape1, shape2) {
kumar <- function(lshape1 = "loge", lshape2 = "loge",
- eshape1 = list(), eshape2 = list(),
ishape1 = NULL, ishape2 = NULL,
grid.shape1 = c(0.4, 6.0),
tol12 = 1.0e-4, zero = NULL)
{
- if (mode(lshape1) != "character" && mode(lshape1) != "name")
- lshape1 <- as.character(substitute(lshape1))
- if (mode(lshape2) != "character" && mode(lshape2) != "name")
- lshape2 <- as.character(substitute(lshape2))
+
+
+ lshape1 <- as.list(substitute(lshape1))
+ eshape1 <- link2list(lshape1)
+ lshape1 <- attr(eshape1, "function.name")
+
+
+ lshape2 <- as.list(substitute(lshape2))
+ eshape2 <- link2list(lshape2)
+ lshape2 <- attr(eshape2, "function.name")
+
+
+
if (length(ishape1) &&
(!is.Numeric(ishape1, allowable.length = 1, positive = TRUE)))
- stop("bad input for argument 'ishape1'")
+ stop("bad input for argument 'ishape1'")
if (length(ishape2) && !is.Numeric(ishape2))
stop("bad input for argument 'ishape2'")
- if (!is.list(eshape1)) eshape1 = list()
- if (!is.list(eshape2)) eshape2 = list()
-
if (!is.Numeric(tol12, allowable.length = 1, positive = TRUE))
stop("bad input for argument 'tol12'")
if (!is.Numeric(grid.shape1, allowable.length = 2, positive = TRUE))
stop("bad input for argument 'grid.shape1'")
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+
+
new("vglmff",
blurb = c("Kumaraswamy distribution\n\n",
"Links: ",
- namesof("shape1", lshape1, earg = eshape1, tag = FALSE), ", ",
- namesof("shape2", lshape2, earg = eshape2, tag = FALSE), "\n",
+ namesof("shape1", lshape1, eshape1, tag = FALSE), ", ",
+ namesof("shape2", lshape2, eshape2, tag = FALSE), "\n",
"Mean: ",
"shape2 * beta(1+1/shape1, shape2)"),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 2
+ eval(negzero.expression)
}), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ lshape1 = .lshape1 ,
+ zero = .zero )
+ }, list( .zero = zero,
+ .lshape1 = lshape1
+ ))),
+
+
initialize = eval(substitute(expression({
- if (ncol(y <- cbind(y)) != 1)
- stop("the response must be a vector or one-column matrix")
- if (any((y <= 0) | (y >=1)))
- stop("the response must be in (0,1)")
- predictors.names <- c(
- namesof("shape1", .lshape1 , earg = .eshape1 , tag = FALSE),
- namesof("shape2", .lshape2 , earg = .eshape2 , tag = FALSE))
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+ if (any((y <= 0) | (y >= 1)))
+ stop("the response must be in (0, 1)")
+
+
+ ncoly <- ncol(y)
+ Musual <- 2
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+ mynames1 <- paste("shape1", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames2 <- paste("shape2", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ c(namesof(mynames1, .lshape1 , earg = .eshape1 , tag = FALSE),
+ namesof(mynames2, .lshape2 , earg = .eshape2 , tag = FALSE))[
+ interleave.VGAM(M, M = Musual)]
+
+
if (!length(etastart)) {
@@ -116,95 +166,126 @@ pkumar = function(q, shape1, shape2) {
- medy <- weighted.mean(y, w)
-
+ mediany <- colSums(y * w) / colSums(w) # weighted.mean(y, w)
- shape2 <- log(0.5) / log1p(-(medy^shape1))
- sum(w * (log(shape1) + log(shape2) + (shape1-1)*log(y) +
- (shape2-1)*log1p(-y^shape1)))
+ shape2 <- log(0.5) / log1p(-(mediany^shape1))
+ sum(c(w) * dkumar(x = y, shape1 = shape1, shape2 = shape2,
+ log = TRUE))
}
+
shape1.grid <- seq( .grid.shape1[1], .grid.shape1[2], len = 19)
shape1.init <- if (length( .ishape1 )) .ishape1 else
- getMaxMin(shape1.grid, objfun = kumar.Loglikfun, y = y, x = x, w = w)
- shape1.init <- rep(shape1.init, length = length(y))
+ getMaxMin(shape1.grid, objfun = kumar.Loglikfun,
+ y = y, x = x, w = w)
+ shape1.init <- matrix(shape1.init, n, ncoly, byrow = TRUE)
+
- medy <- weighted.mean(y, w)
+ mediany <- colSums(y * w) / colSums(w) # weighted.mean(y, w)
shape2.init <- if (length( .ishape2 )) .ishape2 else
- log(0.5) / log1p(-(medy^shape1.init))
- shape2.init <- rep(shape2.init, length = length(y))
+ log(0.5) / log1p(-(mediany^shape1.init))
+ shape2.init <- matrix(shape2.init, n, ncoly, byrow = TRUE)
+
etastart <- cbind(
theta2eta(shape1.init, .lshape1 , earg = .eshape1 ),
- theta2eta(shape2.init, .lshape2 , earg = .eshape2 ))
+ theta2eta(shape2.init, .lshape2 , earg = .eshape2 ))[,
+ interleave.VGAM(M, M = Musual)]
}
}), list( .lshape1 = lshape1, .lshape2 = lshape2,
.ishape1 = ishape1, .ishape2 = ishape2,
.eshape1 = eshape1, .eshape2 = eshape2,
.grid.shape1 = grid.shape1 ))),
linkinv = eval(substitute(function(eta, extra = NULL){
- shape1 <- eta2theta(eta[,1], link = .lshape1 , earg = .eshape1 )
- shape2 <- eta2theta(eta[,2], link = .lshape2 , earg = .eshape2 )
+ shape1 <- eta2theta(eta[, c(TRUE, FALSE)], .lshape1 , earg = .eshape1 )
+ shape2 <- eta2theta(eta[, c(FALSE, TRUE)], .lshape2 , earg = .eshape2 )
shape2 * (base::beta(1 + 1/shape1, shape2))
}, list( .lshape1 = lshape1, .lshape2 = lshape2,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
last = eval(substitute(expression({
- misc$link <- c("shape1" = .lshape1, "shape2" = .lshape2)
- misc$earg <- list("shape1" = .eshape1, "shape2" = .eshape2)
- misc$expected = TRUE
+ Musual <- extra$Musual
+ misc$link <-
+ c(rep( .lshape1 , length = ncoly),
+ rep( .lshape2 , length = ncoly))[interleave.VGAM(M, M = Musual)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+ names(misc$link) <- temp.names
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- temp.names
+ for(ii in 1:ncoly) {
+ misc$earg[[Musual*ii-1]] <- .eshape1
+ misc$earg[[Musual*ii ]] <- .eshape2
+ }
+
+ misc$Musual <- Musual
+ misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
}), list( .lshape1 = lshape1, .lshape2 = lshape2,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- shape1 <- eta2theta(eta[,1], link = .lshape1, earg = .eshape1)
- shape2 <- eta2theta(eta[,2], link = .lshape2, earg = .eshape2)
+ shape1 <- eta2theta(eta[, c(TRUE, FALSE)], .lshape1 , earg = .eshape1 )
+ shape2 <- eta2theta(eta[, c(FALSE, TRUE)], .lshape2 , earg = .eshape2 )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- sum(w * dkumar(x=y, shape1 = shape1, shape2 = shape2, log = TRUE))
+ sum(c(w) * dkumar(x = y, shape1 = shape1,
+ shape2 = shape2, log = TRUE))
}
}, list( .lshape1 = lshape1, .lshape2 = lshape2,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
vfamily = c("kumar"),
deriv = eval(substitute(expression({
- shape1 <- eta2theta(eta[,1], link = .lshape1, earg = .eshape1)
- shape2 <- eta2theta(eta[,2], link = .lshape2, earg = .eshape2)
- dshape1.deta <- dtheta.deta(shape1, link = .lshape1, earg = .eshape1)
- dshape2.deta <- dtheta.deta(shape2, link = .lshape2, earg = .eshape2)
+ shape1 <- eta2theta(eta[, c(TRUE, FALSE)], .lshape1 , earg = .eshape1 )
+ shape2 <- eta2theta(eta[, c(FALSE, TRUE)], .lshape2 , earg = .eshape2 )
+
+ dshape1.deta <- dtheta.deta(shape1, link = .lshape1 , earg = .eshape1 )
+ dshape2.deta <- dtheta.deta(shape2, link = .lshape2 , earg = .eshape2 )
dl.dshape1 <- 1 / shape1 + log(y) - (shape2 - 1) * log(y) *
(y^shape1) / (1 - y^shape1)
dl.dshape2 <- 1 / shape2 + log1p(-y^shape1)
- c(w) * cbind(dl.dshape1 * dshape1.deta,
- dl.dshape2 * dshape2.deta)
+ myderiv <- c(w) * cbind(dl.dshape1 * dshape1.deta,
+ dl.dshape2 * dshape2.deta)
+ myderiv[, interleave.VGAM(M, M = Musual)]
}), list( .lshape1 = lshape1, .lshape2 = lshape2,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
weight = eval(substitute(expression({
- ed2l.dshape11 <- (1 + (shape2 / (shape2 - 2)) *
+ ned2l.dshape11 <- (1 + (shape2 / (shape2 - 2)) *
((digamma(shape2) - digamma(2))^2 -
(trigamma(shape2) - trigamma(2)))) / shape1^2
- ed2l.dshape22 <- 1.0 / shape2^2
- ed2l.dshape12 <-
+ ned2l.dshape22 <- 1.0 / shape2^2
+ ned2l.dshape12 <-
-((digamma(1 + shape2) - digamma(2)) / (shape2 - 1.0)) / shape1
index1 <- (abs(shape2 - 1.0) < .tol12)
if (any(index1))
- ed2l.dshape12[index1] <- -trigamma(2) / shape1[index1]
+ ned2l.dshape12[index1] <- -trigamma(2) / shape1[index1]
- index2 <- (abs(shape2 - 2.0) < .tol12)
+ index2 <- (abs(shape2 - 2.0) < .tol12 )
if (any(index2))
- ed2l.dshape11[index2] <-
+ ned2l.dshape11[index2] <-
(1.0 - 2.0 * psigamma(2.0, deriv = 2)) / shape1[index2]^2
- wz <- matrix(0, n, dimm(M))
- wz[, iam(1, 1, M = M)] <- ed2l.dshape11 * dshape1.deta^2
- wz[, iam(2, 2, M = M)] <- ed2l.dshape22 * dshape2.deta^2
- wz[, iam(1, 2, M = M)] <- ed2l.dshape12 * dshape1.deta * dshape2.deta
- c(w) * wz
+
+ wz <- matrix(0.0, n, M + M - 1) # wz is tridiagonal
+
+ ind11 <- ind22 <- ind12 <- NULL
+ for (ii in 1:(M / Musual)) {
+ ind11 <- c(ind11, iam(Musual*ii - 1, Musual*ii - 1, M))
+ ind22 <- c(ind22, iam(Musual*ii - 0, Musual*ii - 0, M))
+ ind12 <- c(ind12, iam(Musual*ii - 1, Musual*ii - 0, M))
+ }
+
+ wz[, ind11] <- ned2l.dshape11 * dshape1.deta^2
+ wz[, ind22] <- ned2l.dshape22 * dshape2.deta^2
+ wz[, ind12] <- ned2l.dshape12 * dshape1.deta * dshape2.deta
+
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
}), list( .lshape1 = lshape1, .lshape2 = lshape2,
.eshape1 = eshape1, .eshape2 = eshape2,
.tol12 = tol12 ))))
@@ -213,12 +294,15 @@ pkumar = function(q, shape1, shape2) {
+
drice <- function(x, vee, sigma, log = FALSE) {
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
+
+
N <- max(length(x), length(vee), length(sigma))
x <- rep(x, len = N); vee <- rep(vee, len = N);
sigma <- rep(sigma, len = N)
@@ -228,7 +312,8 @@ drice <- function(x, vee, sigma, log = FALSE) {
x.abs <- abs(x[xok] * vee[xok] / sigma[xok]^2)
logdensity[xok] <- log(x[xok]) - 2 * log(sigma[xok]) +
(-(x[xok]^2+vee[xok]^2)/(2*sigma[xok]^2)) +
- log(besselI(x.abs, nu=0, expon.scaled = TRUE)) + x.abs
+ log(besselI(x.abs, nu=0, expon.scaled = TRUE)) +
+ x.abs
logdensity[sigma <= 0] <- NaN
logdensity[vee < 0] <- NaN
if (log.arg) logdensity else exp(logdensity)
@@ -251,26 +336,33 @@ riceff.control <- function(save.weight = TRUE, ...) {
}
- riceff = function(lvee = "loge", lsigma = "loge",
- evee = list(), esigma = list(),
- ivee = NULL, isigma = NULL,
- nsimEIM = 100, zero = NULL)
+ riceff <- function(lvee = "loge", lsigma = "loge",
+ ivee = NULL, isigma = NULL,
+ nsimEIM = 100, zero = NULL)
{
- if (mode(lvee) != "character" && mode(lvee) != "name")
- lvee = as.character(substitute(lvee))
- if (mode(lsigma) != "character" && mode(lsigma) != "name")
- lsigma = as.character(substitute(lsigma))
+
+ lvee <- as.list(substitute(lvee))
+ evee <- link2list(lvee)
+ lvee <- attr(evee, "function.name")
+
+
+ lsigma <- as.list(substitute(lsigma))
+ esigma <- link2list(lsigma)
+ lsigma <- attr(esigma, "function.name")
+
+
+
if (length(ivee) && !is.Numeric(ivee, positive = TRUE))
stop("bad input for argument 'ivee'")
if (length(isigma) && !is.Numeric(isigma, positive = TRUE))
stop("bad input for argument 'isigma'")
- if (!is.list(evee)) evee = list()
- if (!is.list(esigma)) esigma = list()
+
if (!is.Numeric(nsimEIM, allowable.length = 1,
integer.valued = TRUE) ||
nsimEIM <= 50)
stop("'nsimEIM' should be an integer greater than 50")
+
new("vglmff",
blurb = c("Rice distribution\n\n",
"Links: ",
@@ -278,22 +370,35 @@ riceff.control <- function(save.weight = TRUE, ...) {
namesof("sigma", lsigma, earg = esigma, tag = FALSE), "\n",
"Mean: ",
"sigma*sqrt(pi/2)*exp(z/2)*((1-z)*",
- "besselI(-z/2,nu=0)-z*besselI(-z/2,nu=1)) where z=-vee^2/(2*sigma^2)"),
+ "besselI(-z/2, nu = 0) - z * besselI(-z/2, nu = 1)) ",
+ "where z=-vee^2/(2*sigma^2)"),
constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
- if (ncol(y <- cbind(y)) != 1)
- stop("the response must be a vector or one-column matrix")
- if (any((y <= 0)))
- stop("the response must be in (0,Inf)")
- predictors.names = c(
- namesof("vee", .lvee, earg = .evee, tag = FALSE),
- namesof("sigma", .lsigma, earg = .esigma, tag = FALSE))
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = 1,
+ ncol.y.max = 1,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ predictors.names <-
+ c(namesof("vee", .lvee, earg = .evee, tag = FALSE),
+ namesof("sigma", .lsigma, earg = .esigma, tag = FALSE))
+
+
+
if (!length(etastart)) {
- riceff.Loglikfun = function(vee, y, x, w, extraargs) {
+ riceff.Loglikfun <- function(vee, y, x, w, extraargs) {
sigma.init = sd(rep(y, w))
- sum(w * (log(y) - 2*log(sigma.init) +
+ sum(c(w) * (log(y) - 2*log(sigma.init) +
log(besselI(y*vee/sigma.init^2, nu=0)) -
(y^2 + vee^2)/(2*sigma.init^2)))
}
@@ -325,32 +430,38 @@ riceff.control <- function(save.weight = TRUE, ...) {
.evee = evee, .esigma = esigma ))),
last = eval(substitute(expression({
misc$link <- c("vee" = .lvee, "sigma" = .lsigma)
+
misc$earg <- list("vee" = .evee, "sigma" = .esigma)
+
misc$expected = TRUE
misc$nsimEIM = .nsimEIM
+ misc$multipleResponses <- FALSE
}), list( .lvee = lvee, .lsigma = lsigma,
.evee = evee, .esigma = esigma, .nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
- function(mu,y, w,residuals = FALSE,eta,extra = NULL) {
- vee = eta2theta(eta[,1], link = .lvee, earg = .evee)
- sigma = eta2theta(eta[,2], link = .lsigma, earg = .esigma)
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(w * drice(x=y, vee = vee, sigma = sigma, log = TRUE))
- }
+ function(mu,y, w, residuals = FALSE,eta,extra = NULL) {
+ vee = eta2theta(eta[, 1], link = .lvee, earg = .evee)
+ sigma = eta2theta(eta[, 2], link = .lsigma, earg = .esigma)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(c(w) * drice(x = y, vee = vee, sigma = sigma, log = TRUE))
+ }
}, list( .lvee = lvee, .lsigma = lsigma,
.evee = evee, .esigma = esigma ))),
vfamily = c("riceff"),
deriv = eval(substitute(expression({
- vee = eta2theta(eta[,1], link = .lvee, earg = .evee)
- sigma = eta2theta(eta[,2], link = .lsigma, earg = .esigma)
+ vee = eta2theta(eta[, 1], link = .lvee, earg = .evee)
+ sigma = eta2theta(eta[, 2], link = .lsigma, earg = .esigma)
+
dvee.deta = dtheta.deta(vee, link = .lvee, earg = .evee)
dsigma.deta = dtheta.deta(sigma, link = .lsigma, earg = .esigma)
+
temp8 = y * vee / sigma^2
dl.dvee = -vee/sigma^2 + (y/sigma^2) *
besselI(temp8, nu=1) / besselI(temp8, nu=0)
dl.dsigma = -2/sigma + (y^2 + vee^2)/(sigma^3) - (2 * temp8 / sigma) *
besselI(temp8, nu=1) / besselI(temp8, nu=0)
+
c(w) * cbind(dl.dvee * dvee.deta,
dl.dsigma * dsigma.deta)
}), list( .lvee = lvee, .lsigma = lsigma,
@@ -369,7 +480,7 @@ riceff.control <- function(save.weight = TRUE, ...) {
rm(ysim)
temp3 = cbind(dl.dvee, dl.dsigma)
run.var = ((ii-1) * run.var + temp3^2) / ii
- run.cov = ((ii-1) * run.cov + temp3[,1] * temp3[,2]) / ii
+ run.cov = ((ii-1) * run.cov + temp3[, 1] * temp3[, 2]) / ii
}
wz = if (intercept.only)
matrix(colMeans(cbind(run.var, run.cov)),
@@ -377,7 +488,7 @@ riceff.control <- function(save.weight = TRUE, ...) {
dtheta.detas = cbind(dvee.deta, dsigma.deta)
index0 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- wz = wz * dtheta.detas[,index0$row] * dtheta.detas[,index0$col]
+ wz = wz * dtheta.detas[, index0$row] * dtheta.detas[, index0$col]
c(w) * wz
}), list( .lvee = lvee, .lsigma = lsigma,
.evee = evee, .esigma = esigma, .nsimEIM = nsimEIM ))))
@@ -386,19 +497,20 @@ riceff.control <- function(save.weight = TRUE, ...) {
-dskellam = function(x, mu1, mu2, log = FALSE) {
- log.arg = log; rm(log)
- if ( !is.logical( log.arg ) || length( log.arg )!=1 )
- stop("bad input for 'log.arg'")
+dskellam <- function(x, mu1, mu2, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
- L = max(length(x), length(mu1), length(mu2))
- x = rep(x, len = L);
- mu1 = rep(mu1, len = L);
- mu2 = rep(mu2, len = L);
- ok2 <- is.finite(mu1) && is.finite(mu2) & (mu1 >= 0) & (mu2 >= 0)
- ok3 <- (mu1 == 0) & (mu2 > 0)
- ok4 <- (mu1 > 0) & (mu2 == 0)
- ok5 <- (mu1 == 0) & (mu2 == 0)
+
+ L = max(length(x), length(mu1), length(mu2))
+ x = rep(x, len = L);
+ mu1 = rep(mu1, len = L);
+ mu2 = rep(mu2, len = L);
+ ok2 <- is.finite(mu1) && is.finite(mu2) & (mu1 >= 0) & (mu2 >= 0)
+ ok3 <- (mu1 == 0) & (mu2 > 0)
+ ok4 <- (mu1 > 0) & (mu2 == 0)
+ ok5 <- (mu1 == 0) & (mu2 == 0)
if (log.arg) {
ans = -mu1 - mu2 + 2 * sqrt(mu1*mu2) +
0.5 * x * log(mu1) - 0.5 * x * log(mu2) +
@@ -424,7 +536,7 @@ dskellam = function(x, mu1, mu2, log = FALSE) {
-rskellam = function(n, mu1, mu2) {
+rskellam <- function(n, mu1, mu2) {
rpois(n, mu1) - rpois(n, mu2)
}
@@ -435,53 +547,69 @@ skellam.control <- function(save.weight = TRUE, ...) {
}
- skellam = function(lmu1 = "loge", lmu2 = "loge",
- emu1 = list(), emu2= list(),
- imu1 = NULL, imu2 = NULL,
- nsimEIM = 100, parallel = FALSE, zero = NULL)
+ skellam <- function(lmu1 = "loge", lmu2 = "loge",
+ imu1 = NULL, imu2 = NULL,
+ nsimEIM = 100, parallel = FALSE, zero = NULL)
{
- if (mode(lmu1) != "character" && mode(lmu1) != "name")
- lmu1 = as.character(substitute(lmu1))
- if (mode(lmu2) != "character" && mode(lmu2) != "name")
- lmu2 = as.character(substitute(lmu2))
- if (length(imu1) &&
- !is.Numeric(imu1, positive = TRUE))
- stop("bad input for argument 'imu1'")
- if (length(imu2) &&
- !is.Numeric(imu2, positive = TRUE))
- stop("bad input for argument 'imu2'")
-
- if (!is.list(emu1)) emu1 = list()
- if (!is.list(emu2)) emu2 = list()
- if (!is.Numeric(nsimEIM, allowable.length = 1,
- integer.valued = TRUE) ||
- nsimEIM <= 50)
- stop("'nsimEIM' should be an integer greater than 50")
-
- new("vglmff",
- blurb = c("Skellam distribution\n\n",
- "Links: ",
- namesof("mu1", lmu1, earg = emu1, tag = FALSE), ", ",
- namesof("mu2", lmu2, earg = emu2, tag = FALSE), "\n",
- "Mean: mu1-mu2", "\n",
- "Variance: mu1+mu2"),
- constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints,
- intercept.apply = TRUE)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .parallel=parallel, .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(y <- cbind(y)) != 1)
- stop("the response must be a vector or one-column matrix")
- if (any((y != round(y))))
- stop("the response should be integer-valued")
- predictors.names = c(
- namesof("mu1", .lmu1, earg = .emu1, tag = FALSE),
- namesof("mu2", .lmu2, earg = .emu2, tag = FALSE))
- if (!length(etastart)) {
- junk = lm.wfit(x = x, y = y, w = w)
- var.y.est = sum(w * junk$resid^2) / junk$df.residual
- mean.init = weighted.mean(y, w)
+
+ lmu1 <- as.list(substitute(lmu1))
+ emu1 <- link2list(lmu1)
+ lmu1 <- attr(emu1, "function.name")
+
+ lmu2 <- as.list(substitute(lmu2))
+ emu2 <- link2list(lmu2)
+ lmu2 <- attr(emu2, "function.name")
+
+
+ if (length(imu1) &&
+ !is.Numeric(imu1, positive = TRUE))
+ stop("bad input for argument 'imu1'")
+ if (length(imu2) &&
+ !is.Numeric(imu2, positive = TRUE))
+ stop("bad input for argument 'imu2'")
+
+
+
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE) ||
+ nsimEIM <= 50)
+ stop("argument 'nsimEIM' should be an integer greater than 50")
+
+ new("vglmff",
+ blurb = c("Skellam distribution\n\n",
+ "Links: ",
+ namesof("mu1", lmu1, earg = emu1, tag = FALSE), ", ",
+ namesof("mu2", lmu2, earg = emu2, tag = FALSE), "\n",
+ "Mean: mu1-mu2", "\n",
+ "Variance: mu1+mu2"),
+ constraints = eval(substitute(expression({
+ constraints = cm.vgam(matrix(1, M, 1), x, .parallel, constraints,
+ intercept.apply = TRUE)
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .parallel = parallel, .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 1,
+ Is.integer.y = TRUE,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ predictors.names <- c(
+ namesof("mu1", .lmu1, earg = .emu1, tag = FALSE),
+ namesof("mu2", .lmu2, earg = .emu2, tag = FALSE))
+
+
+ if (!length(etastart)) {
+ junk = lm.wfit(x = x, y = c(y), w = c(w))
+ var.y.est = sum(c(w) * junk$resid^2) / junk$df.residual
+ mean.init = weighted.mean(y, w)
mu1.init = max((var.y.est + mean.init)/2, 0.01)
mu2.init = max((var.y.est - mean.init)/2, 0.01)
mu1.init = rep(if(length( .imu1 )) .imu1 else mu1.init,
@@ -490,27 +618,27 @@ skellam.control <- function(save.weight = TRUE, ...) {
length = n)
etastart = cbind(theta2eta(mu1.init, .lmu1, earg = .emu1),
theta2eta(mu2.init, .lmu2, earg = .emu2))
- }
- }), list( .lmu1 = lmu1, .lmu2 = lmu2,
- .imu1=imu1, .imu2=imu2,
- .emu1 = emu1, .emu2 = emu2 ))),
- linkinv = eval(substitute(function(eta, extra = NULL){
- mu1 = eta2theta(eta[,1], link = .lmu1, earg = .emu1)
- mu2 = eta2theta(eta[,2], link = .lmu2, earg = .emu2)
- mu1 - mu2
- }, list( .lmu1 = lmu1, .lmu2 = lmu2,
- .emu1 = emu1, .emu2 = emu2 ))),
- last = eval(substitute(expression({
- misc$link <- c("mu1" = .lmu1, "mu2" = .lmu2)
- misc$earg <- list("mu1" = .emu1, "mu2" = .emu2)
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
- }), list( .lmu1 = lmu1, .lmu2 = lmu2,
- .emu1 = emu1, .emu2 = emu2, .nsimEIM = nsimEIM ))),
- loglikelihood = eval(substitute(
- function(mu,y, w,residuals = FALSE,eta,extra = NULL) {
- mu1 = eta2theta(eta[,1], link = .lmu1, earg = .emu1)
- mu2 = eta2theta(eta[,2], link = .lmu2, earg = .emu2)
+ }
+ }), list( .lmu1 = lmu1, .lmu2 = lmu2,
+ .imu1 = imu1, .imu2 = imu2,
+ .emu1 = emu1, .emu2 = emu2 ))),
+ linkinv = eval(substitute(function(eta, extra = NULL){
+ mu1 = eta2theta(eta[, 1], link = .lmu1, earg = .emu1)
+ mu2 = eta2theta(eta[, 2], link = .lmu2, earg = .emu2)
+ mu1 - mu2
+ }, list( .lmu1 = lmu1, .lmu2 = lmu2,
+ .emu1 = emu1, .emu2 = emu2 ))),
+ last = eval(substitute(expression({
+ misc$link <- c("mu1" = .lmu1, "mu2" = .lmu2)
+ misc$earg <- list("mu1" = .emu1, "mu2" = .emu2)
+ misc$expected = TRUE
+ misc$nsimEIM = .nsimEIM
+ }), list( .lmu1 = lmu1, .lmu2 = lmu2,
+ .emu1 = emu1, .emu2 = emu2, .nsimEIM = nsimEIM ))),
+ loglikelihood = eval(substitute(
+ function(mu,y, w, residuals = FALSE,eta,extra = NULL) {
+ mu1 = eta2theta(eta[, 1], link = .lmu1, earg = .emu1)
+ mu2 = eta2theta(eta[, 2], link = .lmu2, earg = .emu2)
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
@@ -519,20 +647,20 @@ skellam.control <- function(save.weight = TRUE, ...) {
if ( is.logical( .parallel ) && length( .parallel )== 1 &&
.parallel )
- sum(w * log(besselI(2*mu1, nu=y, expon = TRUE))) else
- sum(w * (-mu1 - mu2 +
+ sum(c(w) * log(besselI(2*mu1, nu=y, expon = TRUE))) else
+ sum(c(w) * (-mu1 - mu2 +
0.5 * y * log(mu1) -
0.5 * y * log(mu2) +
2 * sqrt(mu1*mu2) + # Use this when expon = TRUE
log(besselI(2 * sqrt(mu1*mu2), nu=y, expon = TRUE))))
}
}, list( .lmu1 = lmu1, .lmu2 = lmu2,
- .parallel=parallel,
+ .parallel = parallel,
.emu1 = emu1, .emu2 = emu2 ))),
vfamily = c("skellam"),
deriv = eval(substitute(expression({
- mu1 = eta2theta(eta[,1], link = .lmu1, earg = .emu1)
- mu2 = eta2theta(eta[,2], link = .lmu2, earg = .emu2)
+ mu1 = eta2theta(eta[, 1], link = .lmu1, earg = .emu1)
+ mu2 = eta2theta(eta[, 2], link = .lmu2, earg = .emu2)
dmu1.deta = dtheta.deta(mu1, link = .lmu1, earg = .emu1)
dmu2.deta = dtheta.deta(mu2, link = .lmu2, earg = .emu2)
temp8 = 2 * sqrt(mu1*mu2)
@@ -559,7 +687,7 @@ skellam.control <- function(save.weight = TRUE, ...) {
rm(ysim)
temp3 = cbind(dl.dmu1, dl.dmu2)
run.var = ((ii-1) * run.var + temp3^2) / ii
- run.cov = ((ii-1) * run.cov + temp3[,1] * temp3[,2]) / ii
+ run.cov = ((ii-1) * run.cov + temp3[, 1] * temp3[, 2]) / ii
}
wz = if (intercept.only)
matrix(colMeans(cbind(run.var, run.cov)),
@@ -567,7 +695,7 @@ skellam.control <- function(save.weight = TRUE, ...) {
dtheta.detas = cbind(dmu1.deta, dmu2.deta)
index0 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- wz = wz * dtheta.detas[,index0$row] * dtheta.detas[,index0$col]
+ wz = wz * dtheta.detas[, index0$row] * dtheta.detas[, index0$col]
c(w) * wz
}), list( .lmu1 = lmu1, .lmu2 = lmu2,
.emu1 = emu1, .emu2 = emu2, .nsimEIM = nsimEIM ))))
@@ -576,35 +704,36 @@ skellam.control <- function(save.weight = TRUE, ...) {
-dyules = function(x, rho, log = FALSE) {
- log.arg = log
+dyules <- function(x, rho, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
rm(log)
- if ( !is.logical( log.arg ) || length( log.arg )!=1 )
- stop("bad input for 'log.arg'")
+
+
if ( log.arg ) {
- ans = log(rho) + lbeta(abs(x), rho+1)
- ans[(x != round(x)) | (x < 1)] = log(0)
+ ans <- log(rho) + lbeta(abs(x), rho+1)
+ ans[(x != round(x)) | (x < 1)] <- log(0)
} else {
ans = rho * beta(x, rho+1)
- ans[(x != round(x)) | (x < 1)] = 0
+ ans[(x != round(x)) | (x < 1)] <- 0
}
- ans[!is.finite(rho) | (rho <= 0) | (rho <= 0)] = NA
+ ans[!is.finite(rho) | (rho <= 0) | (rho <= 0)] <- NA
ans
}
-ryules = function(n, rho) {
+ryules <- function(n, rho) {
if (!is.Numeric(n, integer.valued = TRUE, allowable.length = 1))
stop("bad input for argument 'n'")
rgeom(n, prob = exp(-rexp(n, rate=rho))) + 1
}
-pyules = function(q, rho) {
- tq = trunc(q)
- ans = 1 - tq * beta(abs(tq), rho+1)
- ans[q<1] = 0
- ans[(rho <= 0) | (rho <= 0)] = NA
+pyules <- function(q, rho) {
+ tq <- trunc(q)
+ ans <- 1 - tq * beta(abs(tq), rho+1)
+ ans[q < 1] <- 0
+ ans[(rho <= 0) | (rho <= 0)] <- NA
ans
}
@@ -616,101 +745,542 @@ yulesimon.control <- function(save.weight = TRUE, ...) {
}
- yulesimon = function(link = "loge", earg = list(), irho = NULL, nsimEIM = 200)
+ yulesimon <- function(link = "loge",
+ irho = NULL, nsimEIM = 200,
+ zero = NULL)
{
- if (length(irho) &&
- !is.Numeric(irho, positive = TRUE))
- stop("argument 'irho' must be > 0")
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
- if (!is.Numeric(nsimEIM, allowable.length = 1,
- integer.valued = TRUE) ||
- nsimEIM <= 50)
- stop("'nsimEIM' should be an integer greater than 50")
-
- new("vglmff",
- blurb = c("Yule-Simon distribution f(y) = rho*beta(y,rho+1), ",
- "rho>0, y=1,2,..\n\n",
+
+ if (length(irho) &&
+ !is.Numeric(irho, positive = TRUE))
+ stop("argument 'irho' must be > 0")
+
+
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE) ||
+ nsimEIM <= 50)
+ stop("argument 'nsimEIM' should be an integer greater than 50")
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+ new("vglmff",
+ blurb = c("Yule-Simon distribution f(y) = rho * beta(y, rho + 1), ",
+ "rho > 0, y = 1, 2,..\n\n",
"Link: ",
- namesof("p", link, earg =earg), "\n\n",
- "Mean: rho/(rho-1), provided rho>1\n",
- "Variance: rho^2 / ((rho-1)^2 * (rho-2)), provided rho>2"),
- initialize = eval(substitute(expression({
- y = as.numeric(y)
- if (any(y < 1))
- stop("all y values must be in 1,2,3,...")
- if (any(y != round(y )))
- warning("y should be integer-valued")
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names = namesof("rho", .link, earg =.earg, tag = FALSE)
-
- if (!length(etastart)) {
- wmeany = weighted.mean(y, w) + 1/8
- rho.init = wmeany / (wmeany - 1)
- rho.init = rep( if (length( .irho )) .irho else
- rho.init, len = n)
- etastart = theta2eta(rho.init, .link, earg =.earg)
- }
- }), list( .link=link, .earg =earg, .irho=irho ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- ans = rho = eta2theta(eta, .link, earg =.earg)
- ans[rho>1] = rho / (rho - 1)
- ans[rho<=1] = NA
- ans
- }, list( .link=link, .earg =earg ))),
- last = eval(substitute(expression({
- misc$link <- c(rho = .link)
- misc$earg <- list(rho = .earg)
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
- }), list( .link=link, .earg =earg, .nsimEIM = nsimEIM ))),
- loglikelihood = eval(substitute(
- function(mu,y, w,residuals= FALSE,eta, extra = NULL) {
- rho = eta2theta(eta, .link, earg =.earg)
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(w * dyules(x=y, rho=rho, log = TRUE))
- }
- }, list( .link=link, .earg =earg ))),
- vfamily = c("yulesimon"),
- deriv = eval(substitute(expression({
- rho = eta2theta(eta, .link, earg =.earg)
- dl.drho = 1/rho + digamma(1+rho) - digamma(1+rho+y)
- drho.deta = dtheta.deta(rho, .link, earg =.earg)
- w * dl.drho * drho.deta
- }), list( .link=link, .earg =earg ))),
- weight = eval(substitute(expression({
- run.var = 0
- for(ii in 1:( .nsimEIM )) {
- ysim = ryules(n, rho=rho)
- dl.drho = 1/rho + digamma(1+rho) - digamma(1+rho+ysim)
- rm(ysim)
- temp3 = dl.drho
- run.var = ((ii-1) * run.var + temp3^2) / ii
- }
- wz = if (intercept.only)
- matrix(colMeans(cbind(run.var)),
- n, dimm(M), byrow = TRUE) else cbind(run.var)
+ namesof("rho", link, earg = earg), "\n\n",
+ "Mean: rho / (rho - 1), provided rho > 1\n",
+ "Variance: rho^2 / ((rho - 1)^2 * (rho - 2)), ",
+ "provided rho > 2"),
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 1
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ nsimEIM = .nsimEIM,
+ zero = .zero )
+ }, list( .zero = zero,
+ .nsimEIM = nsimEIM ))),
- wz = wz * drho.deta^2
+ initialize = eval(substitute(expression({
- c(w) * wz
- }), list( .nsimEIM = nsimEIM ))))
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ Is.integer.y = TRUE,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+ ncoly <- ncol(y)
+
+ Musual <- 1
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+ mynames1 <- paste("rho", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ namesof(mynames1, .link , earg = .earg , tag = FALSE)
+
+ if (!length(etastart)) {
+ wmeany = colSums(y * w) / colSums(w) + 1/8
+
+ rho.init = wmeany / (wmeany - 1)
+ rho.init = matrix(if (length( .irho )) .irho else
+ rho.init, n, M, byrow = TRUE)
+ etastart = theta2eta(rho.init, .link , earg = .earg )
+ }
+ }), list( .link = link, .earg = earg, .irho = irho ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ ans = rho = eta2theta(eta, .link , earg = .earg )
+ ans[rho > 1] <- rho / (rho - 1)
+ ans[rho <= 1] <- NA
+ ans
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+ misc$link <- c(rep( .link , length = ncoly))
+ names(misc$link) <- mynames1
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- mynames1
+ for(ii in 1:ncoly) {
+ misc$earg[[ii]] <- .earg
+ }
+
+ misc$Musual <- Musual
+ misc$irho <- .irho
+ misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
+ misc$nsimEIM <- .nsimEIM
+ }), list( .link = link, .earg = earg, .nsimEIM = nsimEIM,
+ .irho = irho ))),
+ loglikelihood = eval(substitute(
+ function(mu,y, w, residuals = FALSE,eta, extra = NULL) {
+ rho = eta2theta(eta, .link , earg = .earg )
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(c(w) * dyules(x = y, rho = rho, log = TRUE))
+ }
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("yulesimon"),
+ deriv = eval(substitute(expression({
+ Musual <- 1
+ rho <- eta2theta(eta, .link , earg = .earg )
+ dl.drho <- 1/rho + digamma(1+rho) - digamma(1+rho+y)
+ drho.deta <- dtheta.deta(rho, .link , earg = .earg )
+ c(w) * dl.drho * drho.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+
+ run.var = 0
+ for(ii in 1:( .nsimEIM )) {
+ ysim <- ryules(n, rho = rho)
+ dl.drho = 1/rho + digamma(1+rho) - digamma(1+rho+ysim)
+ rm(ysim)
+ temp3 <- dl.drho
+ run.var <- ((ii-1) * run.var + temp3^2) / ii
+ }
+ wz <- if (intercept.only)
+ matrix(colMeans(cbind(run.var)),
+ n, M, byrow = TRUE) else cbind(run.var)
+
+ wz <- wz * drho.deta^2
+
+
+ c(w) * wz
+ }), list( .nsimEIM = nsimEIM ))))
+}
+
+
+
+
+
+
+
+dlind <- function(x, theta, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+
+ if ( log.arg ) {
+ ans <- 2 * log(theta) + log1p(x) - theta * x - log1p(theta)
+ ans[(x < 0)] <- log(0)
+ } else {
+ ans <- theta^2 * (1 + x) * exp(-theta * x) / (1 + theta)
+ ans[(x < 0)] <- 0
+ }
+ ans[(theta <= 0)] <- NaN
+ ans
+}
+
+
+
+plind <- function(q, theta) {
+
+ ifelse(q > 0,
+ 1 - (theta + 1 + theta * q) * exp(-theta * q) / (1 + theta),
+ 0)
+}
+
+
+
+
+
+
+rlind <- function(n, theta) {
+
+
+
+
+ ifelse(runif(n) < theta / (1 + theta),
+ rexp(n, theta),
+ rgamma(n, shape = 2, scale = 1 / theta))
+}
+
+
+
+ lindley <- function(link = "loge",
+ itheta = NULL, zero = NULL) {
+
+
+ if (length(itheta) &&
+ !is.Numeric(itheta, positive = TRUE))
+ stop("argument 'itheta' must be > 0")
+
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+ new("vglmff",
+ blurb = c("Lindley distribution f(y) = ",
+ "theta^2 * (1 + y) * exp(-theta * y) / (1 + theta), ",
+ "theta > 0, y > 0,\n\n",
+ "Link: ",
+ namesof("theta", link, earg = earg), "\n\n",
+ "Mean: (theta + 2) / (theta * (theta + 1))\n",
+ "Variance: (theta^2 + 4 * theta + 2) / (theta * (theta + 1))^2"),
+
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 1
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ zero = .zero )
+ }, list( .zero = zero ))),
+
+ initialize = eval(substitute(expression({
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+ ncoly <- ncol(y)
+
+ Musual <- 1
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+ mynames1 <- paste("theta", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ namesof(mynames1, .link , earg = .earg , tag = FALSE)
+
+ if (!length(etastart)) {
+ wmeany <- colSums(y * w) / colSums(w) + 1/8
+
+
+ theta.init <- 1 / (wmeany + 1)
+ theta.init <- matrix(if (length( .itheta )) .itheta else
+ theta.init, n, M, byrow = TRUE)
+ etastart <- theta2eta(theta.init, .link , earg = .earg )
+ }
+ }), list( .link = link, .earg = earg, .itheta = itheta ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ theta <- eta2theta(eta, .link , earg = .earg )
+ (theta + 2) / (theta * (theta + 1))
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+ misc$link <- c(rep( .link , length = ncoly))
+ names(misc$link) <- mynames1
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- mynames1
+ for(ii in 1:ncoly) {
+ misc$earg[[ii]] <- .earg
+ }
+
+ misc$Musual <- Musual
+ misc$itheta <- .itheta
+ misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
+ }), list( .link = link, .earg = earg,
+ .itheta = itheta ))),
+ loglikelihood = eval(substitute(
+ function(mu,y, w, residuals = FALSE,eta, extra = NULL) {
+ theta <- eta2theta(eta, .link , earg = .earg )
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(c(w) * dlind(x = y, theta = theta, log = TRUE))
+ }
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("lindley"),
+ deriv = eval(substitute(expression({
+ Musual <- 1
+ theta <- eta2theta(eta, .link , earg = .earg )
+
+ dl.dtheta <- 2 / theta - 1 / (1 + theta) - y
+
+ dtheta.deta <- dtheta.deta(theta, .link , earg = .earg )
+
+ c(w) * dl.dtheta * dtheta.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+
+ ned2l.dtheta2 <- (theta^2 + 4 * theta + 2) / (theta * (1 + theta))^2
+
+ c(w) * ned2l.dtheta2 * dtheta.deta^2
+ }), list( .zero = zero ))))
+}
+
+
+
+
+
+
+dpoislindley <- function(x, theta, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ if ( log.arg ) {
+ ans <- 2 * log(theta) + log(theta + 2 + x) -
+ (x+3) * log1p(theta)
+ ans[(x != round(x)) | (x < 0)] <- log(0)
+ } else {
+ ans <- theta^2 * (theta + 2 + x) / (theta + 1)^(x+3)
+ ans[(x != round(x)) | (x < 0)] <- 0
+ }
+ ans[ # !is.finite(theta) |
+ (theta <= 0)] <- NA
+ ans
+}
+
+
+if (FALSE)
+rpoislindley <- function(n, theta) {
+}
+
+
+if (FALSE)
+ppoislindley <- function(q, theta) {
+}
+
+
+
+if (FALSE)
+poislindley.control <- function(save.weight = TRUE, ...) {
+ list(save.weight = save.weight)
+}
+
+
+if (FALSE)
+ poissonlindley <-
+ function(link = "loge",
+ itheta = NULL, nsimEIM = 200,
+ zero = NULL)
+{
+
+ stop("not working since rpoislindley() not written")
+
+
+
+ if (length(itheta) &&
+ !is.Numeric(itheta, positive = TRUE))
+ stop("argument 'itheta' must be > 0")
+
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE) ||
+ nsimEIM <= 50)
+ stop("argument 'nsimEIM' should be an integer greater than 50")
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+ new("vglmff",
+ blurb = c("Poisson-Lindley distribution f(y) = ",
+ "theta^2 * (theta + 2 + y) / (theta + 1)^(y+3), ",
+ "theta > 0, y = 0, 1, 2,..\n\n",
+ "Link: ",
+ namesof("theta", link, earg = earg), "\n\n",
+ "Mean: (theta + 2) / (theta * (theta + 1)),\n",
+ "Variance: (theta^3 + 4 * theta^2 + 6 * theta + 2) / ",
+ "(theta * (theta + 1))^2, "
+ ),
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 1
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ nsimEIM = .nsimEIM,
+ zero = .zero )
+ }, list( .zero = zero,
+ .nsimEIM = nsimEIM ))),
+
+ initialize = eval(substitute(expression({
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ Is.integer.y = TRUE,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+ ncoly <- ncol(y)
+
+ Musual <- 1
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+ mynames1 <- paste("theta", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ namesof(mynames1, .link , earg = .earg , tag = FALSE)
+
+ if (!length(etastart)) {
+ wmeany <- colSums(y * w) / colSums(w) + 1/8
+
+ MOM <- (sqrt((wmeany - 1)^2 + 8 * wmeany) -
+ wmeany + 1) / (2 * wmeany)
+ MOM[MOM < 0.01] <- 0.01
+
+
+ theta.init <- MOM
+ theta.init <- matrix(if (length( .itheta )) .itheta else
+ theta.init, n, M, byrow = TRUE)
+ etastart <- theta2eta(theta.init, .link , earg = .earg )
+ }
+ }), list( .link = link, .earg = earg, .itheta = itheta ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ theta <- eta2theta(eta, .link , earg = .earg )
+ (theta + 2) / (theta * (theta + 1))
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+ misc$link <- c(rep( .link , length = ncoly))
+ names(misc$link) <- mynames1
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- mynames1
+ for(ii in 1:ncoly) {
+ misc$earg[[ii]] <- .earg
+ }
+
+ misc$Musual <- Musual
+ misc$itheta <- .itheta
+ misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
+ misc$nsimEIM <- .nsimEIM
+ }), list( .link = link, .earg = earg, .nsimEIM = nsimEIM,
+ .itheta = itheta ))),
+ loglikelihood = eval(substitute(
+ function(mu,y, w, residuals = FALSE,eta, extra = NULL) {
+ theta = eta2theta(eta, .link , earg = .earg )
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(c(w) * dpoislindley(x = y, theta = theta, log = TRUE))
+ }
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("poissonlindley"),
+ deriv = eval(substitute(expression({
+ Musual <- 1
+ theta <- eta2theta(eta, .link , earg = .earg )
+
+ dl.dtheta <- 2 / theta + 1 / (y + 2 + theta) - (y + 3) / (theta + 1)
+
+ dtheta.deta <- dtheta.deta(theta, .link , earg = .earg )
+
+ c(w) * dl.dtheta * dtheta.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+
+ run.var = 0
+ for(ii in 1:( .nsimEIM )) {
+ ysim <- rpoislindley(n, theta = theta)
+ dl.dtheta <- 2 / theta + 1 / (ysim + 2 + theta) -
+ (ysim + 3) / (theta + 1)
+ rm(ysim)
+ temp3 <- dl.dtheta
+ run.var <- ((ii-1) * run.var + temp3^2) / ii
+ }
+ wz <- if (intercept.only)
+ matrix(colMeans(cbind(run.var)),
+ n, M, byrow = TRUE) else cbind(run.var)
+
+ wz <- wz * dtheta.deta^2
+
+
+ c(w) * wz
+ }), list( .nsimEIM = nsimEIM ))))
}
+
+
+
dslash <- function(x, mu = 0, sigma = 1, log = FALSE,
smallno =.Machine$double.eps*1000){
- log.arg = log
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
rm(log)
+
if (!is.Numeric(sigma) || any(sigma <= 0))
- stop("'sigma' must be positive")
+ stop("argument 'sigma' must be positive")
L = max(length(x), length(mu), length(sigma))
x = rep(x, len = L);
mu = rep(mu, len = L);
@@ -727,7 +1297,7 @@ dslash <- function(x, mu = 0, sigma = 1, log = FALSE,
pslash <- function(q, mu = 0, sigma = 1){
if (!is.Numeric(sigma) || any(sigma <= 0))
- stop("'sigma' must be positive")
+ stop("argument 'sigma' must be positive")
L = max(length(q), length(mu), length(sigma))
q = rep(q, len = L);
mu = rep(mu, len = L);
@@ -761,160 +1331,179 @@ slash.control <- function(save.weight = TRUE, ...)
list(save.weight = save.weight)
}
- slash = function(lmu = "identity", lsigma = "loge",
- emu = list(), esigma = list(),
+
+ slash <- function(lmu = "identity", lsigma = "loge",
imu = NULL, isigma = NULL,
iprobs = c(0.1, 0.9),
nsimEIM = 250, zero = NULL,
- smallno = .Machine$double.eps*1000)
+ smallno = .Machine$double.eps * 1000)
{
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu = as.character(substitute(lmu))
- if (mode(lsigma) != "character" && mode(lsigma) != "name")
- lsigma = as.character(substitute(lsigma))
- if (length(isigma) &&
- !is.Numeric(isigma, positive = TRUE))
- stop("'isigma' must be > 0")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
-
- if (!is.list(emu)) emu = list()
- if (!is.list(esigma)) esigma = list()
- if (!is.Numeric(nsimEIM, allowable.length = 1,
- integer.valued = TRUE) ||
- nsimEIM <= 50)
- stop("'nsimEIM' should be an integer greater than 50")
-
- if (!is.Numeric(iprobs, positive = TRUE) ||
- max(iprobs) >= 1 ||
- length(iprobs) != 2)
- stop("bad input for argument 'iprobs'")
- if (!is.Numeric(smallno, positive = TRUE) ||
- smallno > 0.1)
- stop("bad input for argument 'smallno'")
-
-
- new("vglmff",
- blurb = c("Slash distribution\n\n",
- "Links: ",
- namesof("mu", lmu, earg = emu, tag = FALSE), ", ",
- namesof("sigma", lsigma, earg = esigma, tag = FALSE), "\n",
- paste(
- "1-exp(-(((y-mu)/sigma)^2)/2))/(sqrt(2*pi)*",
- "sigma*((y-mu)/sigma)^2)",
- "\ty!=mu",
- "\n1/(2*sigma*sqrt(2*pi))",
- "\t\t\t\t\t\t\ty=mu\n")),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(y <- cbind(y)) != 1)
- stop("the response must be a vector or one-column matrix")
- predictors.names = c(
- namesof("mu", .lmu, earg = .emu, tag = FALSE),
- namesof("sigma", .lsigma, earg = .esigma, tag = FALSE))
- if (!length(etastart)) {
-
- slash.Loglikfun = function(mu, y, x, w, extraargs) {
- sigma = if (is.Numeric(.isigma)) .isigma else
- max(0.01,
- ((quantile(rep(y, w), prob = 0.75)/2)-mu)/qnorm(0.75))
- zedd = (y-mu)/sigma
- sum(w * ifelse(abs(zedd)<.smallno,
- -log(2*sigma*sqrt(2*pi)),
- log1p(-exp(-zedd^2/2)) -
- log(sqrt(2*pi) * sigma * zedd^2)))
- }
- iprobs = .iprobs
- mu.grid = quantile(rep(y, w), probs=iprobs)
- mu.grid = seq(mu.grid[1], mu.grid[2], length=100)
- mu.init = if (length( .imu )) .imu else
- getMaxMin(mu.grid, objfun = slash.Loglikfun,
- y = y, x = x, w = w)
- sigma.init = if (is.Numeric(.isigma)) .isigma else
- max(0.01,
- ((quantile(rep(y, w), prob = 0.75)/2) -
- mu.init) / qnorm(0.75))
- mu.init = rep(mu.init, length = length(y))
- etastart = matrix(0, n, 2)
- etastart[,1] = theta2eta(mu.init, .lmu, earg =.emu)
- etastart[,2] = theta2eta(sigma.init, .lsigma, earg =.esigma)
- }
- }), list( .lmu = lmu, .lsigma = lsigma,
- .imu = imu, .isigma = isigma,
- .emu = emu, .esigma = esigma,
- .iprobs=iprobs, .smallno = smallno))),
- linkinv = eval(substitute(function(eta, extra = NULL){
- NA * eta2theta(eta[,1], link = .lmu, earg = .emu)
- }, list( .lmu = lmu, .emu = emu ))),
- last = eval(substitute(expression({
- misc$link <- c("mu" = .lmu, "sigma" = .lsigma)
- misc$earg <- list("mu" = .emu, "sigma" = .esigma)
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
- }), list( .lmu = lmu, .lsigma = lsigma,
- .emu = emu, .esigma = esigma, .nsimEIM = nsimEIM ))),
- loglikelihood = eval(substitute(
- function(mu,y, w,residuals = FALSE,eta,extra = NULL) {
- mu = eta2theta(eta[,1], link = .lmu, earg = .emu)
- sigma = eta2theta(eta[,2], link = .lsigma, earg = .esigma)
- zedd = (y - mu) / sigma
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(w * dslash(x=y, mu = mu, sigma = sigma, log = TRUE,
- smallno = .smallno))
- }
- }, list( .lmu = lmu, .lsigma = lsigma,
- .emu = emu, .esigma = esigma, .smallno = smallno ))),
- vfamily = c("slash"),
- deriv = eval(substitute(expression({
- mu = eta2theta(eta[,1], link = .lmu, earg = .emu)
- sigma = eta2theta(eta[,2], link = .lsigma, earg = .esigma)
- dmu.deta = dtheta.deta(mu, link = .lmu, earg = .emu)
- dsigma.deta = dtheta.deta(sigma, link = .lsigma, earg = .esigma)
- zedd = (y-mu)/sigma
- d3 = deriv3(~ w * log(1-exp(-(((y-mu)/sigma)^2)/2))-
- log(sqrt(2*pi)*sigma*((y-mu)/sigma)^2),
- c("mu", "sigma"))
- eval.d3 = eval(d3)
- dl.dthetas = attr(eval.d3, "gradient")
- dl.dmu = dl.dthetas[,1]
- dl.dsigma = dl.dthetas[,2]
- ind0 = (abs(zedd) < .smallno)
- dl.dmu[ind0] = 0
- dl.dsigma[ind0] = -1/sigma[ind0]
- ans = c(w) * cbind(dl.dmu * dmu.deta,
- dl.dsigma * dsigma.deta)
- ans
- }), list( .lmu = lmu, .lsigma = lsigma,
- .emu = emu, .esigma = esigma, .smallno = smallno ))),
- weight=eval(substitute(expression({
- run.varcov = 0
- ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- sd3 = deriv3(~ w * log(1-exp(-(((ysim-mu)/sigma)^2)/2))-
- log(sqrt(2*pi)*sigma*((ysim-mu)/sigma)^2),
- c("mu", "sigma"))
- for(ii in 1:( .nsimEIM )) {
- ysim = rslash(n, mu = mu, sigma = sigma)
- seval.d3 = eval(sd3)
- dl.dthetas = attr(seval.d3, "gradient")
- dl.dmu = dl.dthetas[,1]
- dl.dsigma = dl.dthetas[,2]
+ lmu <- as.list(substitute(lmu))
+ emu <- link2list(lmu)
+ lmu <- attr(emu, "function.name")
+
+ lsigma <- as.list(substitute(lsigma))
+ esigma <- link2list(lsigma)
+ lsigma <- attr(esigma, "function.name")
+
+
+ if (length(isigma) &&
+ !is.Numeric(isigma, positive = TRUE))
+ stop("argument 'isigma' must be > 0")
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE) ||
+ nsimEIM <= 50)
+ stop("argument 'nsimEIM' should be an integer greater than 50")
+
+ if (!is.Numeric(iprobs, positive = TRUE) ||
+ max(iprobs) >= 1 ||
+ length(iprobs) != 2)
+ stop("bad input for argument 'iprobs'")
+ if (!is.Numeric(smallno, positive = TRUE) ||
+ smallno > 0.1)
+ stop("bad input for argument 'smallno'")
+
+
+ new("vglmff",
+ blurb = c("Slash distribution\n\n",
+ "Links: ",
+ namesof("mu", lmu, earg = emu, tag = FALSE), ", ",
+ namesof("sigma", lsigma, earg = esigma, tag = FALSE), "\n",
+ paste(
+ "1-exp(-(((y-mu)/sigma)^2)/2))/(sqrt(2*pi)*",
+ "sigma*((y-mu)/sigma)^2)",
+ "\ty!=mu",
+ "\n1/(2*sigma*sqrt(2*pi))",
+ "\t\t\t\t\t\t\ty=mu\n")),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 1,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ predictors.names = c(
+ namesof("mu", .lmu, earg = .emu, tag = FALSE),
+ namesof("sigma", .lsigma, earg = .esigma, tag = FALSE))
+
+
+ if (!length(etastart)) {
+
+ slash.Loglikfun <- function(mu, y, x, w, extraargs) {
+ sigma = if (is.Numeric(.isigma)) .isigma else
+ max(0.01,
+ ((quantile(rep(y, w), prob = 0.75)/2)-mu)/qnorm(0.75))
+ zedd = (y-mu)/sigma
+ sum(c(w) * ifelse(abs(zedd)<.smallno,
+ -log(2*sigma*sqrt(2*pi)),
+ log1p(-exp(-zedd^2/2)) -
+ log(sqrt(2*pi) * sigma * zedd^2)))
+ }
+ iprobs = .iprobs
+ mu.grid = quantile(rep(y, w), probs=iprobs)
+ mu.grid = seq(mu.grid[1], mu.grid[2], length=100)
+ mu.init = if (length( .imu )) .imu else
+ getMaxMin(mu.grid, objfun = slash.Loglikfun,
+ y = y, x = x, w = w)
+ sigma.init = if (is.Numeric(.isigma)) .isigma else
+ max(0.01,
+ ((quantile(rep(y, w), prob = 0.75)/2) -
+ mu.init) / qnorm(0.75))
+ mu.init = rep(mu.init, length = length(y))
+ etastart = matrix(0, n, 2)
+ etastart[, 1] = theta2eta(mu.init, .lmu, earg = .emu)
+ etastart[, 2] = theta2eta(sigma.init, .lsigma, earg = .esigma)
+ }
+ }), list( .lmu = lmu, .lsigma = lsigma,
+ .imu = imu, .isigma = isigma,
+ .emu = emu, .esigma = esigma,
+ .iprobs = iprobs, .smallno = smallno))),
+ linkinv = eval(substitute(function(eta, extra = NULL){
+ NA * eta2theta(eta[, 1], link = .lmu, earg = .emu)
+ }, list( .lmu = lmu, .emu = emu ))),
+ last = eval(substitute(expression({
+ misc$link <- c("mu" = .lmu, "sigma" = .lsigma)
+
+ misc$earg <- list("mu" = .emu, "sigma" = .esigma)
+
+ misc$expected = TRUE
+ misc$nsimEIM = .nsimEIM
+ }), list( .lmu = lmu, .lsigma = lsigma,
+ .emu = emu, .esigma = esigma, .nsimEIM = nsimEIM ))),
+ loglikelihood = eval(substitute(
+ function(mu,y, w, residuals = FALSE,eta,extra = NULL) {
+ mu = eta2theta(eta[, 1], link = .lmu, earg = .emu)
+ sigma = eta2theta(eta[, 2], link = .lsigma, earg = .esigma)
+ zedd = (y - mu) / sigma
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(c(w) * dslash(x = y, mu = mu, sigma = sigma, log = TRUE,
+ smallno = .smallno))
+ }
+ }, list( .lmu = lmu, .lsigma = lsigma,
+ .emu = emu, .esigma = esigma, .smallno = smallno ))),
+ vfamily = c("slash"),
+ deriv = eval(substitute(expression({
+ mu = eta2theta(eta[, 1], link = .lmu, earg = .emu)
+ sigma = eta2theta(eta[, 2], link = .lsigma, earg = .esigma)
+ dmu.deta = dtheta.deta(mu, link = .lmu, earg = .emu)
+ dsigma.deta = dtheta.deta(sigma, link = .lsigma, earg = .esigma)
+ zedd = (y-mu)/sigma
+ d3 = deriv3(~ w * log(1-exp(-(((y-mu)/sigma)^2)/2))-
+ log(sqrt(2*pi)*sigma*((y-mu)/sigma)^2),
+ c("mu", "sigma"))
+ eval.d3 = eval(d3)
+ dl.dthetas = attr(eval.d3, "gradient")
+ dl.dmu = dl.dthetas[, 1]
+ dl.dsigma = dl.dthetas[, 2]
+ ind0 = (abs(zedd) < .smallno)
+ dl.dmu[ind0] = 0
+ dl.dsigma[ind0] = -1/sigma[ind0]
+ ans = c(w) * cbind(dl.dmu * dmu.deta,
+ dl.dsigma * dsigma.deta)
+ ans
+ }), list( .lmu = lmu, .lsigma = lsigma,
+ .emu = emu, .esigma = esigma, .smallno = smallno ))),
+ weight=eval(substitute(expression({
+ run.varcov = 0
+ ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ sd3 = deriv3(~ w * log(1-exp(-(((ysim-mu)/sigma)^2)/2))-
+ log(sqrt(2*pi)*sigma*((ysim-mu)/sigma)^2),
+ c("mu", "sigma"))
+ for(ii in 1:( .nsimEIM )) {
+ ysim = rslash(n, mu = mu, sigma = sigma)
+ seval.d3 = eval(sd3)
+
+ dl.dthetas = attr(seval.d3, "gradient")
+ dl.dmu = dl.dthetas[, 1]
+ dl.dsigma = dl.dthetas[, 2]
temp3 = cbind(dl.dmu, dl.dsigma)
run.varcov = ((ii-1) * run.varcov +
- temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
+ temp3[, ind1$row.index]*temp3[, ind1$col.index]) / ii
}
wz = if (intercept.only)
matrix(colMeans(run.varcov, na.rm = FALSE),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
dthetas.detas = cbind(dmu.deta, dsigma.deta)
- wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
+ wz = wz * dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
c(w) * wz
}), list( .lmu = lmu, .lsigma = lsigma,
.emu = emu, .esigma = esigma,
@@ -924,88 +1513,103 @@ slash.control <- function(save.weight = TRUE, ...)
-dnefghs = function(x, tau, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
+dnefghs <- function(x, tau, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
- N = max(length(x), length(tau))
- x = rep(x, len = N); tau = rep(tau, len = N);
+ N = max(length(x), length(tau))
+ x = rep(x, len = N); tau = rep(tau, len = N);
- logdensity = log(sin(pi*tau)) + (1-tau)*x - log(pi) - log1p(exp(x))
- logdensity[tau < 0] = NaN
- logdensity[tau > 1] = NaN
- if (log.arg) logdensity else exp(logdensity)
+ logdensity = log(sin(pi*tau)) + (1-tau)*x - log(pi) - log1p(exp(x))
+ logdensity[tau < 0] = NaN
+ logdensity[tau > 1] = NaN
+ if (log.arg) logdensity else exp(logdensity)
}
- nefghs <- function(link = "logit", earg = list(), itau = NULL,
- imethod = 1)
+ nefghs <- function(link = "logit",
+ itau = NULL, imethod = 1)
{
- if (length(itau) &&
- !is.Numeric(itau, positive = TRUE) ||
- any(itau >= 1))
- stop("argument 'itau' must be in (0,1)")
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
-
- if (!is.list(earg)) earg = list()
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
+ if (length(itau) &&
+ !is.Numeric(itau, positive = TRUE) ||
+ any(itau >= 1))
+ stop("argument 'itau' must be in (0, 1)")
- new("vglmff",
- blurb = c("Natural exponential family generalized hyperbolic ",
- "secant distribution\n",
- "f(y) = sin(pi*tau)*exp((1-tau)*y)/(pi*(1+exp(y))\n\n",
- "Link: ",
- namesof("tau", link, earg =earg), "\n\n",
- "Mean: pi / tan(pi * tau)\n"),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names = namesof("tau", .link, earg =.earg, tag = FALSE)
-
- if (!length(etastart)) {
- wmeany = if ( .imethod == 1) weighted.mean(y, w) else
- median(rep(y, w))
- if (abs(wmeany) < 0.01) wmeany = 0.01
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+
+
+ new("vglmff",
+ blurb = c("Natural exponential family generalized hyperbolic ",
+ "secant distribution\n",
+ "f(y) = sin(pi*tau)*exp((1-tau)*y)/(pi*(1+exp(y))\n\n",
+ "Link: ",
+ namesof("tau", link, earg = earg), "\n\n",
+ "Mean: pi / tan(pi * tau)\n"),
+ initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 1,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ predictors.names <-
+ namesof("tau", .link , earg = .earg , tag = FALSE)
+
+
+ if (!length(etastart)) {
+ wmeany = if ( .imethod == 1) weighted.mean(y, w) else
+ median(rep(y, w))
+ if (abs(wmeany) < 0.01) wmeany = 0.01
tau.init = atan(pi / wmeany) / pi + 0.5
tau.init[tau.init < 0.03] = 0.03
tau.init[tau.init > 0.97] = 0.97
- tau.init = rep( if (length( .itau )) .itau else tau.init, len = n)
- etastart = theta2eta(tau.init, .link, earg =.earg)
+ tau.init = rep(if (length( .itau )) .itau else tau.init,
+ len = n)
+ etastart = theta2eta(tau.init, .link , earg = .earg )
}
}), list( .link = link, .earg = earg, .itau = itau,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- tau = eta2theta(eta, .link, earg =.earg)
+ tau = eta2theta(eta, .link , earg = .earg )
pi / tan(pi * tau)
- }, list( .link=link, .earg =earg ))),
+ }, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
misc$link <- c(tau = .link)
- misc$earg <- list(tau = .earg)
+ misc$earg <- list(tau = .earg )
misc$expected = TRUE
misc$imethod= .imethod
- }), list( .link=link, .earg =earg, .imethod = imethod ))),
+ }), list( .link = link, .earg = earg, .imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu,y, w,residuals= FALSE,eta, extra = NULL) {
- tau = eta2theta(eta, .link, earg =.earg)
+ function(mu,y, w, residuals = FALSE,eta, extra = NULL) {
+ tau = eta2theta(eta, .link , earg = .earg )
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
- sum(w * dnefghs(x=y, tau=tau, log = TRUE))
+ sum(c(w) * dnefghs(x = y, tau = tau, log = TRUE))
}
- }, list( .link=link, .earg =earg ))),
+ }, list( .link = link, .earg = earg ))),
vfamily = c("nefghs"),
deriv = eval(substitute(expression({
- tau = eta2theta(eta, .link, earg =.earg)
+ tau = eta2theta(eta, .link , earg = .earg )
dl.dtau = pi / tan(pi * tau) - y
- dtau.deta = dtheta.deta(tau, .link, earg =.earg)
+ dtau.deta = dtheta.deta(tau, .link , earg = .earg )
w * dl.dtau * dtau.deta
- }), list( .link=link, .earg =earg ))),
+ }), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
d2l.dtau2 = (pi / sin(pi * tau))^2
wz = d2l.dtau2 * dtau.deta^2
@@ -1016,21 +1620,21 @@ dnefghs = function(x, tau, log = FALSE) {
-dlogF = function(x, shape1, shape2, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
+dlogF <- function(x, shape1, shape2, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
- logdensity = -shape2*x - lbeta(shape1, shape2) -
- (shape1 + shape2) * log1p(exp(-x))
- if (log.arg) logdensity else exp(logdensity)
+
+ logdensity = -shape2*x - lbeta(shape1, shape2) -
+ (shape1 + shape2) * log1p(exp(-x))
+ if (log.arg) logdensity else exp(logdensity)
}
- logF = function(lshape1 = "loge", lshape2 = "loge",
- eshape1 = list(), eshape2 = list(),
+ logF <- function(lshape1 = "loge", lshape2 = "loge",
ishape1 = NULL, ishape2 = 1,
imethod = 1)
{
@@ -1041,12 +1645,17 @@ dlogF = function(x, shape1, shape2, log = FALSE) {
!is.Numeric(ishape2, positive = TRUE))
stop("argument 'ishape2' must be positive")
- if (mode(lshape1) != "character" && mode(lshape1) != "name")
- lshape1 = as.character(substitute(lshape1))
- if (mode(lshape2) != "character" && mode(lshape2) != "name")
- lshape2 = as.character(substitute(lshape2))
- if (!is.list(eshape1)) eshape1 = list()
- if (!is.list(eshape2)) eshape2 = list()
+
+ lshape1 <- as.list(substitute(lshape1))
+ eshape1 <- link2list(lshape1)
+ lshape1 <- attr(eshape1, "function.name")
+
+
+ lshape2 <- as.list(substitute(lshape2))
+ eshape2 <- link2list(lshape2)
+ lshape2 <- attr(eshape2, "function.name")
+
+
if (!is.Numeric(imethod, allowable.length = 1,
integer.valued = TRUE, positive = TRUE) ||
imethod > 2)
@@ -1057,18 +1666,26 @@ dlogF = function(x, shape1, shape2, log = FALSE) {
"f(y) = exp(-shape2*y)/(beta(shape1,shape2)*",
"(1+exp(-y))^(shape1+shape2))\n\n",
"Link: ",
- namesof("shape1", lshape1, earg =eshape1),
- ", ",
- namesof("shape2", lshape2, earg =eshape2),
- "\n\n",
+ namesof("shape1", lshape1, earg = eshape1), ", ",
+ namesof("shape2", lshape2, earg = eshape2), "\n\n",
"Mean: digamma(shape1) - digamma(shape2)"),
initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 1,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
predictors.names = c(
namesof("shape1", .lshape1, earg = .eshape1, tag = FALSE),
namesof("shape2", .lshape2, earg = .eshape2, tag = FALSE))
+
if (!length(etastart)) {
wmeany = if ( .imethod == 1) weighted.mean(y, w) else
median(rep(y, w))
@@ -1092,8 +1709,8 @@ dlogF = function(x, shape1, shape2, log = FALSE) {
.ishape1 = ishape1, .ishape2 = ishape2,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- shape1 = eta2theta(eta[,1], .lshape1, earg = .eshape1)
- shape2 = eta2theta(eta[,2], .lshape2, earg = .eshape2)
+ shape1 = eta2theta(eta[, 1], .lshape1, earg = .eshape1)
+ shape2 = eta2theta(eta[, 2], .lshape2, earg = .eshape2)
digamma(shape1) - digamma(shape2)
}, list( .lshape1 = lshape1, .lshape2 = lshape2,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
@@ -1106,19 +1723,20 @@ dlogF = function(x, shape1, shape2, log = FALSE) {
.eshape1 = eshape1, .eshape2 = eshape2,
.imethod = imethod ))),
loglikelihood = eval(substitute(
- function(mu,y, w,residuals= FALSE,eta, extra = NULL) {
- shape1 = eta2theta(eta[,1], .lshape1, earg = .eshape1)
- shape2 = eta2theta(eta[,2], .lshape2, earg = .eshape2)
+ function(mu,y, w, residuals = FALSE,eta, extra = NULL) {
+ shape1 = eta2theta(eta[, 1], .lshape1, earg = .eshape1)
+ shape2 = eta2theta(eta[, 2], .lshape2, earg = .eshape2)
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
- sum(w * dlogF(x=y, shape1 = shape1, shape2 = shape2, log = TRUE))
+ sum(c(w) * dlogF(x = y, shape1 = shape1,
+ shape2 = shape2, log = TRUE))
}
}, list( .lshape1 = lshape1, .lshape2 = lshape2,
.eshape1 = eshape1, .eshape2 = eshape2 ))),
vfamily = c("logF"),
deriv = eval(substitute(expression({
- shape1 = eta2theta(eta[,1], .lshape1, earg = .eshape1)
- shape2 = eta2theta(eta[,2], .lshape2, earg = .eshape2)
+ shape1 = eta2theta(eta[, 1], .lshape1, earg = .eshape1)
+ shape2 = eta2theta(eta[, 2], .lshape2, earg = .eshape2)
tmp888 = digamma(shape1 + shape2) - log1p(exp(-y))
dl.dshape1 = tmp888 - digamma(shape1)
dl.dshape2 = tmp888 - digamma(shape2) - y
@@ -1134,9 +1752,9 @@ dlogF = function(x, shape1, shape2, log = FALSE) {
d2l.dshape22 = trigamma(shape2) - tmp888
d2l.dshape1shape2 = -tmp888
wz = matrix(0, n, dimm(M))
- wz[,iam(1,1,M = M)] = d2l.dshape12 * dshape1.deta^2
- wz[,iam(2,2,M = M)] = d2l.dshape22 * dshape2.deta^2
- wz[,iam(1,2,M = M)] = d2l.dshape1shape2 * dshape1.deta *
+ wz[,iam(1, 1, M = M)] = d2l.dshape12 * dshape1.deta^2
+ wz[,iam(2, 2, M = M)] = d2l.dshape22 * dshape2.deta^2
+ wz[,iam(1, 2, M = M)] = d2l.dshape1shape2 * dshape1.deta *
dshape2.deta
c(w) * wz
}), list( .lshape1 = lshape1, .lshape2 = lshape2,
@@ -1154,7 +1772,12 @@ dbenf <- function(x, ndigits = 1, log = FALSE) {
stop("argument 'ndigits' must be 1 or 2")
lowerlimit <- ifelse(ndigits == 1, 1, 10)
upperlimit <- ifelse(ndigits == 1, 9, 99)
- log.arg <- log; rm(log)
+
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+
ans <- x * NA
indexTF <- is.finite(x) & (x >= lowerlimit)
@@ -1220,7 +1843,7 @@ qbenf <- function(p, ndigits = 1) {
upperlimit <- ifelse(ndigits == 1, 9, 99)
bad <- !is.na(p) & !is.nan(p) & ((p < 0) | (p > 1))
if (any(bad))
- stop("bad input for 'p'")
+ stop("bad input for argument 'p'")
ans <- rep(lowerlimit, length = length(p))
for(ii in (lowerlimit+1):upperlimit) {
diff --git a/R/family.basics.R b/R/family.basics.R
index f288285..4dd751a 100644
--- a/R/family.basics.R
+++ b/R/family.basics.R
@@ -363,7 +363,8 @@ add.constraints <- function(constraints, new.constraints,
-iam <- function(j, k, M, hbw = M, both = FALSE, diag = TRUE) {
+ iam <- function(j, k, M, # hbw = M,
+ both = FALSE, diag = TRUE) {
jay <- j
@@ -709,16 +710,13 @@ setMethod("weights", "vglm",
dotFortran <- function(name, ..., NAOK = FALSE, DUP = TRUE,
PACKAGE = "VGAM") {
- if (is.R()) {
- .Fortran(name=name, ..., NAOK = NAOK, DUP = DUP, PACKAGE = PACKAGE)
- } else {
- stop()
- }
+ .Fortran(name, ..., NAOK = NAOK, DUP = DUP, PACKAGE = PACKAGE)
}
-dotC <- function(name, ..., NAOK = FALSE, DUP = TRUE, PACKAGE = "VGAM") {
- .C(name=name, ..., NAOK = NAOK, DUP = DUP, PACKAGE = PACKAGE)
+dotC <- function(name, ..., NAOK = FALSE, DUP = TRUE,
+ PACKAGE = "VGAM") {
+ .C(name, ..., NAOK = NAOK, DUP = DUP, PACKAGE = PACKAGE)
}
@@ -957,8 +955,179 @@ is.empty.list = function(mylist) {
+interleave.VGAM = function(L, M) c(matrix(1:L, nrow = M, byrow = TRUE))
+
+
+
+
+
+w.wz.merge <- function(w, wz, n, M, ndepy,
+ intercept.only = FALSE) {
+
+
+
+
+
+ wz <- as.matrix(wz)
+
+ if (ndepy == 1)
+ return( c(w) * wz)
+
+
+ if (intercept.only)
+ warning("yettodo: support intercept.only == TRUE")
+
+ if (ncol(as.matrix(w)) > ndepy)
+ stop("number of columns of 'w' exceeds number of responses")
+
+ w <- matrix(w, n, ndepy)
+ w.rep <- matrix(0, n, ncol(wz))
+ Musual <- M / ndepy
+ all.indices = iam(NA, NA, M = M, both = TRUE)
+
+
+
+ if (FALSE)
+ for (ii in 1:ncol(wz)) {
+
+ if ((ind1 <- ceiling(all.indices$row[ii] / Musual)) ==
+ ceiling(all.indices$col[ii] / Musual)) {
+ w.rep[, ii] <- w[, ind1]
+ }
+
+
+ } # ii
+
+
+ res.Ind1 <- ceiling(all.indices$row.index / Musual)
+ Ind1 <- res.Ind1 == ceiling(all.indices$col.index / Musual)
+
+ LLLL <- min(ncol(wz), length(Ind1))
+ Ind1 <- Ind1[1:LLLL]
+ res.Ind1 <- res.Ind1[1:LLLL]
+
+ for (ii in 1:ndepy) {
+ sub.ind1 <- (1:LLLL)[Ind1 & (res.Ind1 == ii)]
+ w.rep[, sub.ind1] <- w[, ii]
+ } # ii
+
+ w.rep * wz
+}
+
+w.y.check <- function(w, y,
+ ncol.w.max = 1, ncol.y.max = 1,
+ ncol.w.min = 1, ncol.y.min = 1,
+ out.wy = FALSE,
+ colsyperw = 1,
+ maximize = FALSE,
+ Is.integer.y = FALSE,
+ Is.positive.y = FALSE,
+ Is.nonnegative.y = FALSE,
+ prefix.w = "PriorWeight",
+ prefix.y = "Response") {
+
+
+
+ if (!is.matrix(w))
+ w <- as.matrix(w)
+ if (!is.matrix(y))
+ y <- as.matrix(y)
+ n_lm <- nrow(y)
+ rn.w <- rownames(w)
+ rn.y <- rownames(y)
+ cn.w <- colnames(w)
+ cn.y <- colnames(y)
+
+
+ if (Is.integer.y && any(y != round(y)))
+ stop("response variable 'y' must be integer-valued")
+ if (Is.positive.y && any(y <= 0))
+ stop("response variable 'y' must be positive-valued")
+ if (Is.nonnegative.y && any(y < 0))
+ stop("response variable 'y' must be 0 or positive-valued")
+
+ if (nrow(w) != n_lm)
+ stop("nrow(w) should be equal to nrow(y)")
+
+ if (ncol(w) > ncol.w.max)
+ stop("prior-weight variable 'w' has too many columns")
+ if (ncol(y) > ncol.y.max)
+ stop("response variable 'y' has too many columns; ",
+ "only ", ncol.y.max, " allowed")
+
+ if (ncol(w) < ncol.w.min)
+ stop("prior-weight variable 'w' has too few columns")
+ if (ncol(y) < ncol.y.min)
+ stop("response variable 'y' has too few columns; ",
+ "at least ", ncol.y.max, " needed")
+
+ if (min(w) <= 0)
+ stop("prior-weight variable 'w' must contain positive values only")
+
+ if (is.numeric(colsyperw) && ncol(y) %% colsyperw != 0)
+ stop("number of columns of the response variable 'y' is not ",
+ "a multiple of ", colsyperw)
+
+
+ if (maximize) {
+ Ncol.max.w = max(ncol(w), ncol(y) / colsyperw)
+ Ncol.max.y = max(ncol(y), ncol(w) * colsyperw)
+ } else {
+ Ncol.max.w = ncol(w)
+ Ncol.max.y = ncol(y)
+ }
+
+ if (out.wy && ncol(w) < Ncol.max.w) {
+ nblanks <- sum(cn.w == "")
+ if (nblanks > 0)
+ cn.w[cn.w == ""] <- paste(prefix.w, 1:nblanks, sep = "")
+ if (length(cn.w) < Ncol.max.w)
+ cn.w <- c(cn.w, paste(prefix.w, (length(cn.w)+1):Ncol.max.w,
+ sep = ""))
+ w <- matrix(w, n_lm, Ncol.max.w, dimnames = list(rn.w, cn.w))
+ }
+ if (out.wy && ncol(y) < Ncol.max.y) {
+ nblanks <- sum(cn.y == "")
+ if (nblanks > 0)
+ cn.y[cn.y == ""] <- paste(prefix.y, 1:nblanks, sep = "")
+ if (length(cn.y) < Ncol.max.y)
+ cn.y <- c(cn.y, paste(prefix.y, (length(cn.y)+1):Ncol.max.y,
+ sep = ""))
+ y <- matrix(y, n_lm, Ncol.max.y, dimnames = list(rn.y, cn.y))
+ }
+
+ list(w = if (out.wy) w else NULL,
+ y = if (out.wy) y else NULL)
+}
+
+
+
+
+vweighted.mean.default <- function (x, w, ..., na.rm = FALSE) {
+ temp5 <- w.y.check(w = w, y = x, ncol.w.max = Inf, ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE,
+ Is.integer.y = FALSE,
+ Is.positive.y = FALSE,
+ Is.nonnegative.y = FALSE,
+ prefix.w = "PriorWeight",
+ prefix.y = "Response")
+
+ x <- temp5$y
+ w <- temp5$w
+
+ ans <- numeric(ncol(w))
+ for (ii in 1:ncol(w))
+ ans[ii] <- weighted.mean(x[, ii], w = w[, ii], ..., na.rm = na.rm)
+ ans
+}
+
+
+
+
diff --git a/R/family.binomial.R b/R/family.binomial.R
index f6d9b21..a347537 100644
--- a/R/family.binomial.R
+++ b/R/family.binomial.R
@@ -17,48 +17,49 @@ process.binomial2.data.vgam <- expression({
- if (!all(w == 1))
- extra$orig.w = w
-
-
- if (!is.matrix(y)) {
- yf <- as.factor(y)
- lev <- levels(yf)
- llev <- length(lev)
- if (llev != 4)
- stop("response must have 4 levels")
- nn <- length(yf)
- y <- matrix(0, nn, llev)
- y[cbind(1:nn, as.vector(unclass(yf)))] <- 1
- colnamesy <- paste(lev, ":", c("00", "01", "10", "11"), sep = "")
- dimnames(y) <- list(names(yf), colnamesy)
- input.type <- 1
- } else if (ncol(y) == 2) {
- if (!all(y == 0 | y == 1))
- stop("response must contains 0's and 1's only")
- col.index <- y[,2] + 2*y[,1] + 1 # 1:4
- nn <- nrow(y)
- y <- matrix(0, nn, 4)
- y[cbind(1:nn, col.index)] <- 1
- dimnames(y) <- list(dimnames(y)[[1]], c("00", "01", "10", "11"))
- input.type <- 2
- } else if (ncol(y) == 4) {
- input.type <- 3
- } else
- stop("response unrecognized")
-
-
-
- nvec <- rowSums(y)
-
- w <- w * nvec
- y <- y / nvec # Convert to proportions
-
- if (length(mustart) + length(etastart) == 0) {
- mu <- y + (1 / ncol(y) - y) / nvec
- dimnames(mu) <- dimnames(y)
- mustart <- mu
- }
+ if (!all(w == 1))
+ extra$orig.w = w
+
+
+ if (!is.matrix(y)) {
+ yf <- as.factor(y)
+ lev <- levels(yf)
+ llev <- length(lev)
+ if (llev != 4)
+ stop("response must have 4 levels")
+ nn <- length(yf)
+ y <- matrix(0, nn, llev)
+ y[cbind(1:nn, as.vector(unclass(yf)))] <- 1
+ colnamesy <- paste(lev, ":", c("00", "01", "10", "11"), sep = "")
+ dimnames(y) <- list(names(yf), colnamesy)
+ input.type <- 1
+ } else if (ncol(y) == 2) {
+ if (!all(y == 0 | y == 1))
+ stop("response must contains 0's and 1's only")
+ col.index <- y[, 2] + 2*y[, 1] + 1 # 1:4
+ nn <- nrow(y)
+ y <- matrix(0, nn, 4)
+ y[cbind(1:nn, col.index)] <- 1
+ dimnames(y) <- list(dimnames(y)[[1]],
+ c("00", "01", "10", "11"))
+ input.type <- 2
+ } else if (ncol(y) == 4) {
+ input.type <- 3
+ } else
+ stop("response unrecognized")
+
+
+
+ nvec <- rowSums(y)
+
+ w <- w * nvec
+ y <- y / nvec # Convert to proportions
+
+ if (length(mustart) + length(etastart) == 0) {
+ mu <- y + (1 / ncol(y) - y) / nvec
+ dimnames(mu) <- dimnames(y)
+ mustart <- mu
+ }
})
@@ -66,25 +67,26 @@ process.binomial2.data.vgam <- expression({
-betabinomial.control <- function(save.weight = TRUE, ...)
-{
- list(save.weight = save.weight)
+betabinomial.control <- function(save.weight = TRUE, ...) {
+ list(save.weight = save.weight)
}
betabinomial <- function(lmu = "logit", lrho = "logit",
- emu = list(), erho = list(), irho = NULL,
+ irho = NULL,
imethod = 1, shrinkage.init = 0.95,
nsimEIM = NULL, zero = 2)
{
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu <- as.character(substitute(lmu))
- if (mode(lrho) != "character" && mode(lrho) != "name")
- lrho <- as.character(substitute(lrho))
+ lmu <- as.list(substitute(lmu))
+ emu <- link2list(lmu)
+ lmu <- attr(emu, "function.name")
+
+ lrho <- as.list(substitute(lrho))
+ erho <- link2list(lrho)
+ lrho <- attr(erho, "function.name")
+
- if (!is.list(emu )) emu <- list()
- if (!is.list(erho)) erho <- list()
if (!is.Numeric(imethod, allowable.length = 1,
integer.valued = TRUE, positive = TRUE) ||
@@ -136,7 +138,7 @@ betabinomial.control <- function(save.weight = TRUE, ...)
predictors.names <- c(namesof("mu", .lmu, earg = .emu, tag = FALSE),
namesof("rho", .lrho, earg = .erho, tag = FALSE))
if (!length(etastart)) {
- betabinomial.Loglikfun = function(rhoval, y, x, w, extraargs) {
+ betabinomial.Loglikfun <- function(rhoval, y, x, w, extraargs) {
shape1 <- extraargs$mustart * (1-rhoval) / rhoval
shape2 <- (1-extraargs$mustart) * (1-rhoval) / rhoval
ycounts <- extraargs$ycounts # Ought to be integer-valued
@@ -179,7 +181,7 @@ betabinomial.control <- function(save.weight = TRUE, ...)
.imethod = imethod, .sinit = shrinkage.init,
.nsimEIM = nsimEIM, .irho = irho ))),
linkinv = eval(substitute(function(eta, extra = NULL)
- eta2theta(eta[,1], .lmu, earg = .emu),
+ eta2theta(eta[, 1], .lmu, earg = .emu),
list( .lmu = lmu, .emu = emu ))),
last = eval(substitute(expression({
misc$link <- c(mu = .lmu, rho = .lrho)
@@ -196,8 +198,8 @@ betabinomial.control <- function(save.weight = TRUE, ...)
ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
y * w # Convert proportions to counts
- mymu <- eta2theta(eta[,1], .lmu, earg = .emu)
- rho <- eta2theta(eta[,2], .lrho, earg = .erho)
+ mymu <- eta2theta(eta[, 1], .lmu, earg = .emu)
+ rho <- eta2theta(eta[, 2], .lrho, earg = .erho)
smallno <- 1.0e4 * .Machine$double.eps
if (max(abs(ycounts - round(ycounts))) > smallno)
@@ -217,7 +219,7 @@ betabinomial.control <- function(save.weight = TRUE, ...)
} else {
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
dbetabinom.ab(x = ycounts, size = nvec, shape1 = shape1,
- shape2 = shape2, log = TRUE ))
+ shape2 = shape2, log = TRUE ))
}
}, list( .lmu = lmu, .lrho = lrho,
.emu = emu, .erho = erho ))),
@@ -229,8 +231,8 @@ betabinomial.control <- function(save.weight = TRUE, ...)
y * w # Convert proportions to counts
ycounts <- round(ycounts)
- mymu <- eta2theta(eta[,1], .lmu, earg = .emu)
- rho <- eta2theta(eta[,2], .lrho, earg = .erho)
+ mymu <- eta2theta(eta[, 1], .lmu, earg = .emu)
+ rho <- eta2theta(eta[, 2], .lrho, earg = .erho)
smallno <- 100 * .Machine$double.eps
rho <- pmax(rho, smallno)
rho <- pmin(rho, 1-smallno)
@@ -269,13 +271,13 @@ betabinomial.control <- function(save.weight = TRUE, ...)
trigamma(shape2) + trigamma(shape1+shape2))
wz21 <- -(trigamma(shape1+shape2) - trigamma(shape1+shape2+nvec))
- wz[,iam(1, 1, M)] <- dmu.deta^2 * (wz11 * dshape1.dmu^2 +
+ wz[, iam(1, 1, M)] <- dmu.deta^2 * (wz11 * dshape1.dmu^2 +
wz22 * dshape2.dmu^2 +
2 * wz21 * dshape1.dmu * dshape2.dmu)
- wz[,iam(2, 2, M)] <- drho.deta^2 * (wz11 * dshape1.drho^2 +
+ wz[, iam(2, 2, M)] <- drho.deta^2 * (wz11 * dshape1.drho^2 +
wz22 * dshape2.drho^2 +
2 * wz21 * dshape1.drho * dshape2.drho)
- wz[,iam(2, 1, M)] <- dmu.deta * drho.deta *
+ wz[, iam(2, 1, M)] <- dmu.deta * drho.deta *
(dshape1.dmu*(wz11*dshape1.drho + wz21*dshape2.drho) +
dshape2.dmu*(wz21*dshape1.drho + wz22*dshape2.drho))
@@ -286,21 +288,21 @@ betabinomial.control <- function(save.weight = TRUE, ...)
dthetas.detas <- cbind(dmu.deta, drho.deta)
for (ii in 1:( .nsimEIM )) {
- ysim <- rbetabinom.ab(n = n, size = nvec, shape1 = shape1,
- shape2 = shape2)
- dl.dmu <- dshape1.dmu * (digamma(shape1+ysim) -
- digamma(shape2+nvec-ysim) -
- digamma(shape1) + digamma(shape2))
- dl.drho <- (-1/rho^2) * (mymu * digamma(shape1+ysim) +
- (1-mymu) * digamma(shape2+nvec-ysim) -
- digamma(shape1+shape2+nvec) -
- mymu * digamma(shape1) -
- (1-mymu)*digamma(shape2) + digamma(shape1+shape2))
-
-
- temp3 <- cbind(dl.dmu, dl.drho) # n x M matrix
- run.varcov <- run.varcov +
- temp3[,ind1$row.index] * temp3[,ind1$col.index]
+ ysim <- rbetabinom.ab(n = n, size = nvec, shape1 = shape1,
+ shape2 = shape2)
+ dl.dmu <- dshape1.dmu * (digamma(shape1+ysim) -
+ digamma(shape2+nvec-ysim) -
+ digamma(shape1) + digamma(shape2))
+ dl.drho <- (-1/rho^2) * (mymu * digamma(shape1+ysim) +
+ (1-mymu) * digamma(shape2+nvec-ysim) -
+ digamma(shape1+shape2+nvec) -
+ mymu * digamma(shape1) -
+ (1-mymu)*digamma(shape2) + digamma(shape1+shape2))
+
+
+ temp3 <- cbind(dl.dmu, dl.drho) # n x M matrix
+ run.varcov <- run.varcov +
+ temp3[, ind1$row.index] * temp3[, ind1$col.index]
}
run.varcov <- run.varcov / .nsimEIM
@@ -309,7 +311,8 @@ betabinomial.control <- function(save.weight = TRUE, ...)
matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
- wz <- wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
+ wz <- wz * dthetas.detas[, ind1$row] *
+ dthetas.detas[, ind1$col]
wz * (if (is.numeric(extra$orig.w)) extra$orig.w else 1)
}
}), list( .lmu = lmu, .lrho = lrho,
@@ -322,7 +325,7 @@ betabinomial.control <- function(save.weight = TRUE, ...)
-dbinom2.or = function(mu1,
+dbinom2.or <- function(mu1,
mu2 = if (exchangeable) mu1 else
stop("'mu2' not specified"),
oratio = 1,
@@ -365,14 +368,14 @@ dbinom2.or = function(mu1,
-rbinom2.or = function(n, mu1,
+rbinom2.or <- function(n, mu1,
mu2 = if (exchangeable) mu1 else
stop("argument 'mu2' not specified"),
oratio = 1,
exchangeable = FALSE,
tol = 0.001,
twoCols = TRUE,
- colnames = if (twoCols) c("y1","y2") else
+ colnames = if (twoCols) c("y1", "y2") else
c("00", "01", "10", "11"),
ErrorCheck = TRUE)
{
@@ -401,12 +404,12 @@ rbinom2.or = function(n, mu1,
dimnames = list(NULL,
if (twoCols) colnames else NULL))
yy = runif(n)
- cs1 = dmat[,"00"] + dmat[,"01"]
- cs2 = cs1 + dmat[,"10"]
- index = (dmat[,"00"] < yy) & (yy <= cs1)
- answer[index,2] = 1
+ cs1 = dmat[, "00"] + dmat[, "01"]
+ cs2 = cs1 + dmat[, "10"]
+ index = (dmat[, "00"] < yy) & (yy <= cs1)
+ answer[index, 2] = 1
index = (cs1 < yy) & (yy <= cs2)
- answer[index,1] = 1
+ answer[index, 1] = 1
index = (yy > cs2)
answer[index,] = 1
if (twoCols) answer else {
@@ -419,21 +422,34 @@ rbinom2.or = function(n, mu1,
- binom2.or = function(lmu = "logit", lmu1 = lmu, lmu2 = lmu,
- loratio = "loge",
- emu = list(), emu1 = emu, emu2 = emu, eoratio = list(),
- imu1 = NULL, imu2 = NULL, ioratio = NULL,
- zero = 3, exchangeable = FALSE, tol = 0.001,
- morerobust = FALSE)
+ binom2.or <- function(lmu = "logit", lmu1 = lmu, lmu2 = lmu,
+ loratio = "loge",
+ imu1 = NULL, imu2 = NULL, ioratio = NULL,
+ zero = 3, exchangeable = FALSE, tol = 0.001,
+ morerobust = FALSE)
{
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu = as.character(substitute(lmu))
- if (mode(lmu1) != "character" && mode(lmu1) != "name")
- lmu1 = as.character(substitute(lmu1))
- if (mode(lmu2) != "character" && mode(lmu2) != "name")
- lmu2 = as.character(substitute(lmu2))
- if (mode(loratio) != "character" && mode(loratio) != "name")
- loratio = as.character(substitute(loratio))
+
+ lmu1 <- lmu1
+ lmu2 <- lmu2
+
+
+ lmu1 <- as.list(substitute(lmu1))
+ emu1 <- link2list(lmu1)
+ lmu1 <- attr(emu1, "function.name")
+
+ lmu2 <- as.list(substitute(lmu2))
+ emu2 <- link2list(lmu2)
+ lmu2 <- attr(emu2, "function.name")
+
+
+
+
+
+ loratio <- as.list(substitute(loratio))
+ eoratio <- link2list(loratio)
+ loratio <- attr(eoratio, "function.name")
+
+
if (is.logical(exchangeable) && exchangeable && ((lmu1 != lmu2) ||
!all.equal(emu1, emu2)))
@@ -442,23 +458,19 @@ rbinom2.or = function(n, mu1,
tol > 0.1)
stop("bad input for argument 'tol'")
- if (!is.list(emu1)) emu1 = list()
- if (!is.list(emu2)) emu2 = list()
- if (!is.list(eoratio)) eoratio = list()
-
- new("vglmff",
- blurb = c("Bivariate binomial regression with an odds ratio\n",
- "Links: ",
- namesof("mu1", lmu1, earg=emu1), ", ",
- namesof("mu2", lmu2, earg=emu2), "; ",
- namesof("oratio", loratio, earg=eoratio)),
- constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(c(1,1,0,0,0,1),3,2), x,
- .exchangeable, constraints,
- intercept.apply = TRUE)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .exchangeable = exchangeable, .zero = zero ))),
+ new("vglmff",
+ blurb = c("Bivariate binomial regression with an odds ratio\n",
+ "Links: ",
+ namesof("mu1", lmu1, earg = emu1), ", ",
+ namesof("mu2", lmu2, earg = emu2), "; ",
+ namesof("oratio", loratio, earg = eoratio)),
+ constraints = eval(substitute(expression({
+ constraints = cm.vgam(matrix(c(1, 1,0,0,0, 1), 3, 2), x,
+ .exchangeable, constraints,
+ intercept.apply = TRUE)
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .exchangeable = exchangeable, .zero = zero ))),
deviance = Deviance.categorical.data.vgam,
initialize = eval(substitute(expression({
mustart.orig = mustart
@@ -466,11 +478,13 @@ rbinom2.or = function(n, mu1,
if (length(mustart.orig))
mustart = mustart.orig # Retain it if inputted
+
predictors.names =
c(namesof("mu1", .lmu1, earg = .emu1, short = TRUE),
namesof("mu2", .lmu2, earg = .emu2, short = TRUE),
namesof("oratio", .loratio, earg = .eoratio, short = TRUE))
+
if (!length(etastart)) {
pmargin = cbind(mustart[, 3] + mustart[, 4],
mustart[, 2] + mustart[, 4])
@@ -479,139 +493,141 @@ rbinom2.or = function(n, mu1,
mustart[, 3])
if (length( .imu1 )) pmargin[, 1] = .imu1
if (length( .imu2 )) pmargin[, 2] = .imu2
- etastart = cbind(theta2eta(pmargin[,1], .lmu1, earg = .emu1),
- theta2eta(pmargin[,2], .lmu2, earg = .emu2),
+ etastart = cbind(theta2eta(pmargin[, 1], .lmu1, earg = .emu1),
+ theta2eta(pmargin[, 2], .lmu2, earg = .emu2),
theta2eta(ioratio, .loratio, earg = .eoratio))
}
}), list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
.emu1 = emu1, .emu2 = emu2, .eoratio = eoratio,
.imu1 = imu1, .imu2 = imu2, .ioratio = ioratio ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- pmargin = cbind(eta2theta(eta[,1], .lmu1, earg = .emu1),
- eta2theta(eta[,2], .lmu2, earg = .emu2))
- oratio = eta2theta(eta[,3], .loratio, earg = .eoratio)
- a.temp = 1 + (pmargin[,1]+pmargin[,2])*(oratio-1)
- b.temp = -4 * oratio * (oratio-1) * pmargin[,1] * pmargin[,2]
- temp = sqrt(a.temp^2 + b.temp)
- pj4 = ifelse(abs(oratio-1) < .tol, pmargin[,1]*pmargin[,2],
- (a.temp-temp)/(2*(oratio-1)))
- pj2 = pmargin[,2] - pj4
- pj3 = pmargin[,1] - pj4
- cbind("00" = 1-pj4-pj2-pj3,
- "01" = pj2,
- "10" = pj3,
- "11" = pj4)
- }, list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
- .emu1 = emu1, .emu2 = emu2, .eoratio = eoratio,
- .tol = tol ))),
- last = eval(substitute(expression({
- misc$link = c("mu1"= .lmu1, "mu2"= .lmu2, "oratio"= .loratio)
- misc$earg = list(mu1 = .emu1, mu2 = .emu2, oratio = .eoratio)
- misc$tol = .tol
- misc$expected = TRUE
- }), list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
- .emu1 = emu1, .emu2 = emu2, .eoratio = eoratio,
- .tol = tol ))),
- linkfun = eval(substitute(function(mu, extra = NULL) {
- pmargin = cbind(mu[,3]+mu[,4], mu[,2]+mu[,4])
- oratio = mu[,4]*mu[,1] / (mu[,2]*mu[,3])
- cbind(theta2eta(pmargin[,1], .lmu1, earg = .emu1),
- theta2eta(pmargin[,2], .lmu2, earg = .emu2),
- theta2eta(oratio, .loratio, earg = .eoratio))
- }, list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
- .emu1 = emu1, .emu2 = emu2, .eoratio = eoratio ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- if ( .morerobust) {
- vsmallno = 1.0e4 * .Machine$double.xmin
- mu[mu < vsmallno] = vsmallno
- }
+ pmargin = cbind(eta2theta(eta[, 1], .lmu1, earg = .emu1),
+ eta2theta(eta[, 2], .lmu2, earg = .emu2))
+ oratio = eta2theta(eta[, 3], .loratio, earg = .eoratio)
+ a.temp = 1 + (pmargin[, 1]+pmargin[, 2])*(oratio-1)
+ b.temp = -4 * oratio * (oratio-1) * pmargin[, 1] * pmargin[, 2]
+ temp = sqrt(a.temp^2 + b.temp)
+ pj4 = ifelse(abs(oratio-1) < .tol, pmargin[, 1]*pmargin[, 2],
+ (a.temp-temp)/(2*(oratio-1)))
+ pj2 = pmargin[, 2] - pj4
+ pj3 = pmargin[, 1] - pj4
+ cbind("00" = 1-pj4-pj2-pj3,
+ "01" = pj2,
+ "10" = pj3,
+ "11" = pj4)
+ }, list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
+ .emu1 = emu1, .emu2 = emu2, .eoratio = eoratio,
+ .tol = tol ))),
+ last = eval(substitute(expression({
+ misc$link = c(mu1 = .lmu1 , mu2 = .lmu2 , oratio = .loratio)
+ misc$earg = list(mu1 = .emu1 , mu2 = .emu2 , oratio = .eoratio)
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
+ misc$tol = .tol
+ misc$expected = TRUE
+ }), list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
+ .emu1 = emu1, .emu2 = emu2, .eoratio = eoratio,
+ .tol = tol ))),
+ linkfun = eval(substitute(function(mu, extra = NULL) {
+ pmargin = cbind(mu[, 3]+mu[, 4], mu[, 2]+mu[, 4])
+ oratio = mu[, 4]*mu[, 1] / (mu[, 2]*mu[, 3])
+ cbind(theta2eta(pmargin[, 1], .lmu1 , earg = .emu1),
+ theta2eta(pmargin[, 2], .lmu2 , earg = .emu2),
+ theta2eta(oratio, .loratio, earg = .eoratio))
+ }, list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
+ .emu1 = emu1, .emu2 = emu2, .eoratio = eoratio ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ if ( .morerobust) {
+ vsmallno = 1.0e4 * .Machine$double.xmin
+ mu[mu < vsmallno] = vsmallno
+ }
- smallno = 1.0e4 * .Machine$double.eps
- if (max(abs(ycounts - round(ycounts))) > smallno)
- warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts = round(ycounts)
+ ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+ nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, dochecking = FALSE))
+ smallno = 1.0e4 * .Machine$double.eps
+ if (max(abs(ycounts - round(ycounts))) > smallno)
+ warning("converting 'ycounts' to integer in @loglikelihood")
+ ycounts = round(ycounts)
- }
- }, list( .morerobust = morerobust ))),
- vfamily = c("binom2.or", "binom2"),
- deriv = eval(substitute(expression({
- smallno = 1.0e4 * .Machine$double.eps
- mu.use = mu
- mu.use[mu.use < smallno] = smallno
- mu.use[mu.use > 1 - smallno] = 1 - smallno
- pmargin = cbind(mu.use[, 3] + mu.use[, 4],
- mu.use[, 2] + mu.use[, 4])
- pmargin[, 1] = pmax( smallno, pmargin[, 1])
- pmargin[, 1] = pmin(1 - smallno, pmargin[, 1])
- pmargin[, 2] = pmax( smallno, pmargin[, 2])
- pmargin[, 2] = pmin(1 - smallno, pmargin[, 2])
-
- oratio = mu.use[,4]*mu.use[,1] / (mu.use[,2]*mu.use[,3])
- use.oratio = pmax(smallno, oratio)
- a.temp = 1 + (pmargin[,1]+pmargin[,2])*(oratio-1)
- b.temp = -4 * oratio * (oratio-1) * pmargin[,1] * pmargin[,2]
- temp9 = sqrt(a.temp^2 + b.temp)
-
- coeff12 = -0.5 + (2*oratio*pmargin - a.temp) / (2*temp9)
- dl.dmu1 = coeff12[,2] * (y[,1]/mu.use[,1]-y[,3]/mu.use[,3]) -
- (1+coeff12[,2]) * (y[,2]/mu.use[,2]-y[,4]/mu.use[,4])
-
- dl.dmu2 = coeff12[,1] * (y[,1]/mu.use[,1]-y[,2]/mu.use[,2]) -
- (1+coeff12[,1]) * (y[,3]/mu.use[,3]-y[,4]/mu.use[,4])
+ sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dmultinomial(x = ycounts, size = nvec, prob = mu,
+ log = TRUE, dochecking = FALSE))
+
+ }
+ }, list( .morerobust = morerobust ))),
+ vfamily = c("binom2.or", "binom2"),
+ deriv = eval(substitute(expression({
+ smallno = 1.0e4 * .Machine$double.eps
+ mu.use = mu
+ mu.use[mu.use < smallno] = smallno
+ mu.use[mu.use > 1 - smallno] = 1 - smallno
+ pmargin = cbind(mu.use[, 3] + mu.use[, 4],
+ mu.use[, 2] + mu.use[, 4])
+ pmargin[, 1] = pmax( smallno, pmargin[, 1])
+ pmargin[, 1] = pmin(1 - smallno, pmargin[, 1])
+ pmargin[, 2] = pmax( smallno, pmargin[, 2])
+ pmargin[, 2] = pmin(1 - smallno, pmargin[, 2])
+
+ oratio = mu.use[, 4]*mu.use[, 1] / (mu.use[, 2]*mu.use[, 3])
+ use.oratio = pmax(smallno, oratio)
+ a.temp = 1 + (pmargin[, 1]+pmargin[, 2])*(oratio-1)
+ b.temp = -4 * oratio * (oratio-1) * pmargin[, 1] * pmargin[, 2]
+ temp9 = sqrt(a.temp^2 + b.temp)
+
+ coeff12 = -0.5 + (2*oratio*pmargin - a.temp) / (2*temp9)
+ dl.dmu1 = coeff12[, 2] * (y[, 1]/mu.use[, 1]-y[, 3]/mu.use[, 3]) -
+ (1+coeff12[, 2]) * (y[, 2]/mu.use[, 2]-y[, 4]/mu.use[, 4])
- coeff3 = (y[,1]/mu.use[,1] - y[,2]/mu.use[,2] -
- y[,3]/mu.use[,3] + y[,4]/mu.use[,4])
- Vab = pmax(smallno, 1 / (1/mu.use[,1] + 1/mu.use[,2] +
- 1/mu.use[,3] + 1/mu.use[,4]))
- dp11.doratio = Vab / use.oratio
- dl.doratio = coeff3 * dp11.doratio
-
- c(w) * cbind(dl.dmu1 * dtheta.deta(pmargin[,1], .lmu1, earg = .emu1),
- dl.dmu2 * dtheta.deta(pmargin[,2], .lmu2, earg = .emu2),
- dl.doratio * dtheta.deta(oratio, .loratio, earg = .eoratio))
- }), list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
- .emu1 = emu1, .emu2 = emu2, .eoratio = eoratio ))),
- weight = eval(substitute(expression({
- Deltapi = mu.use[,3]*mu.use[,2] - mu.use[,4]*mu.use[,1]
- myDelta = pmax(smallno, mu.use[,1] * mu.use[,2] *
- mu.use[,3] * mu.use[,4])
- pqmargin = pmargin * (1-pmargin)
- pqmargin[pqmargin < smallno] = smallno
-
- wz = matrix(0, n, 4)
- wz[,iam(1,1,M)] = (pqmargin[,2] * Vab / myDelta) *
- dtheta.deta(pmargin[,1], .lmu1, earg = .emu1)^2
- wz[,iam(2,2,M)] = (pqmargin[,1] * Vab / myDelta) *
- dtheta.deta(pmargin[,2], .lmu2, earg = .emu2)^2
- wz[,iam(3,3,M)] = (Vab / use.oratio^2) *
- dtheta.deta(use.oratio, .loratio, earg = .eoratio)^2
- wz[,iam(1,2,M)] = (Vab * Deltapi / myDelta) *
- dtheta.deta(pmargin[,1], .lmu1, earg = .emu1) *
- dtheta.deta(pmargin[,2], .lmu2, earg = .emu2)
- c(w) * wz
- }), list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
- .emu1 = emu1, .emu2 = emu2, .eoratio = eoratio ))))
+ dl.dmu2 = coeff12[, 1] * (y[, 1]/mu.use[, 1]-y[, 2]/mu.use[, 2]) -
+ (1+coeff12[, 1]) * (y[, 3]/mu.use[, 3]-y[, 4]/mu.use[, 4])
+
+ coeff3 = (y[, 1]/mu.use[, 1] - y[, 2]/mu.use[, 2] -
+ y[, 3]/mu.use[, 3] + y[, 4]/mu.use[, 4])
+ Vab = pmax(smallno, 1 / (1/mu.use[, 1] + 1/mu.use[, 2] +
+ 1/mu.use[, 3] + 1/mu.use[, 4]))
+ dp11.doratio = Vab / use.oratio
+ dl.doratio = coeff3 * dp11.doratio
+
+ c(w) * cbind(dl.dmu1 * dtheta.deta(pmargin[, 1], .lmu1, earg = .emu1),
+ dl.dmu2 * dtheta.deta(pmargin[, 2], .lmu2, earg = .emu2),
+ dl.doratio * dtheta.deta(oratio, .loratio, earg = .eoratio))
+ }), list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
+ .emu1 = emu1, .emu2 = emu2, .eoratio = eoratio ))),
+ weight = eval(substitute(expression({
+ Deltapi = mu.use[, 3]*mu.use[, 2] - mu.use[, 4]*mu.use[, 1]
+ myDelta = pmax(smallno, mu.use[, 1] * mu.use[, 2] *
+ mu.use[, 3] * mu.use[, 4])
+ pqmargin = pmargin * (1-pmargin)
+ pqmargin[pqmargin < smallno] = smallno
+
+ wz = matrix(0, n, 4)
+ wz[, iam(1, 1, M)] = (pqmargin[, 2] * Vab / myDelta) *
+ dtheta.deta(pmargin[, 1], .lmu1, earg = .emu1)^2
+ wz[, iam(2, 2, M)] = (pqmargin[, 1] * Vab / myDelta) *
+ dtheta.deta(pmargin[, 2], .lmu2, earg = .emu2)^2
+ wz[, iam(3, 3, M)] = (Vab / use.oratio^2) *
+ dtheta.deta(use.oratio, .loratio, earg = .eoratio)^2
+ wz[, iam(1, 2, M)] = (Vab * Deltapi / myDelta) *
+ dtheta.deta(pmargin[, 1], .lmu1, earg = .emu1) *
+ dtheta.deta(pmargin[, 2], .lmu2, earg = .emu2)
+ c(w) * wz
+ }), list( .lmu1 = lmu1, .lmu2 = lmu2, .loratio = loratio,
+ .emu1 = emu1, .emu2 = emu2, .eoratio = eoratio ))))
}
-dbinom2.rho = function(mu1,
- mu2 = if (exchangeable) mu1 else stop("'mu2' not specified"),
- rho=0,
- exchangeable = FALSE,
- colnames = c("00", "01", "10", "11"),
- ErrorCheck = TRUE)
+dbinom2.rho =
+ function(mu1,
+ mu2 = if (exchangeable) mu1 else stop("'mu2' not specified"),
+ rho = 0,
+ exchangeable = FALSE,
+ colnames = c("00", "01", "10", "11"),
+ ErrorCheck = TRUE)
{
if (ErrorCheck) {
if (!is.Numeric(mu1, positive = TRUE) || max(mu1) >= 1)
@@ -640,28 +656,33 @@ dbinom2.rho = function(mu1,
-rbinom2.rho = function(n, mu1,
- mu2 = if (exchangeable) mu1 else
- stop("'mu2' not specified"),
- rho=0,
- exchangeable = FALSE,
- twoCols = TRUE,
- colnames = if (twoCols) c("y1","y2") else
- c("00", "01", "10", "11"),
- ErrorCheck = TRUE)
+rbinom2.rho =
+ function(n, mu1,
+ mu2 = if (exchangeable) mu1 else
+ stop("argument 'mu2' not specified"),
+ rho = 0,
+ exchangeable = FALSE,
+ twoCols = TRUE,
+ colnames = if (twoCols) c("y1", "y2") else
+ c("00", "01", "10", "11"),
+ ErrorCheck = TRUE)
{
if (ErrorCheck) {
if (!is.Numeric(n, integer.valued = TRUE,
positive = TRUE, allowable.length = 1))
stop("bad input for argument 'n'")
- if (!is.Numeric(mu1, positive = TRUE) || max(mu1) >= 1)
+ if (!is.Numeric(mu1, positive = TRUE) ||
+ max(mu1) >= 1)
stop("bad input for argument 'mu1'")
- if (!is.Numeric(mu2, positive = TRUE) || max(mu2) >= 1)
+ if (!is.Numeric(mu2, positive = TRUE) ||
+ max(mu2) >= 1)
stop("bad input for argument 'mu2'")
- if (!is.Numeric(rho) || min(rho) <= -1 || max(rho) >= 1)
+ if (!is.Numeric(rho) || min(rho) <= -1 ||
+ max(rho) >= 1)
stop("bad input for argument 'rho'")
- if (exchangeable && max(abs(mu1 - mu2)) > 0.00001)
- stop("argument 'exchangeable' is TRUE but 'mu1' and 'mu2' differ")
+ if (exchangeable &&
+ max(abs(mu1 - mu2)) > 0.00001)
+ stop("argument 'exchangeable' is TRUE but 'mu1' and 'mu2' differ")
}
dmat = dbinom2.rho(mu1 = mu1, mu2 = mu2, rho = rho,
@@ -672,17 +693,17 @@ rbinom2.rho = function(n, mu1,
dimnames = list(NULL,
if (twoCols) colnames else NULL))
yy = runif(n)
- cs1 = dmat[,"00"] + dmat[,"01"]
- cs2 = cs1 + dmat[,"10"]
- index = (dmat[,"00"] < yy) & (yy <= cs1)
- answer[index,2] = 1
+ cs1 = dmat[, "00"] + dmat[, "01"]
+ cs2 = cs1 + dmat[, "10"]
+ index = (dmat[, "00"] < yy) & (yy <= cs1)
+ answer[index, 2] = 1
index = (cs1 < yy) & (yy <= cs2)
- answer[index,1] = 1
+ answer[index, 1] = 1
index = (yy > cs2)
answer[index,] = 1
if (twoCols) answer else {
answer4 = matrix(0, n, 4, dimnames = list(NULL, colnames))
- answer4[cbind(1:n, 1 + 2*answer[,1] + answer[,2])] = 1
+ answer4[cbind(1:n, 1 + 2*answer[, 1] + answer[, 2])] = 1
answer4
}
}
@@ -698,276 +719,298 @@ binom2.rho.control <- function(save.weight = TRUE, ...)
- binom2.rho = function(lrho = "rhobit", erho = list(),
- imu1 = NULL, imu2 = NULL, irho = NULL,
- imethod = 1,
- zero = 3, exchangeable = FALSE,
- nsimEIM = NULL)
+ binom2.rho <- function(lrho = "rhobit",
+ lmu = "probit", # added 20120817
+ imu1 = NULL, imu2 = NULL, irho = NULL,
+ imethod = 1,
+ zero = 3, exchangeable = FALSE,
+ nsimEIM = NULL)
{
- if (mode(lrho) != "character" && mode(lrho) != "name")
- lrho = as.character(substitute(lrho))
- if (!is.list(erho)) erho = list()
- lmu12 = "probit"
- emu12 = list()
+ lrho <- as.list(substitute(lrho))
+ erho <- link2list(lrho)
+ lrho <- attr(erho, "function.name")
- if (is.Numeric(nsimEIM)) {
- if (!is.Numeric(nsimEIM, allowable.length = 1,
- integer.valued = TRUE))
- stop("bad input for argument 'nsimEIM'")
- if (nsimEIM <= 100)
- warning("'nsimEIM' should be an integer greater than 100")
- }
+ lmu <- as.list(substitute(lmu))
+ emu <- link2list(lmu)
+ lmu <- attr(emu, "function.name")
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
+ if (lmu != "probit")
+ warning("argument 'lmu' should be 'probit'")
- new("vglmff",
- blurb = c("Bivariate probit model\n",
- "Links: ",
- namesof("mu1", lmu12, earg = emu12), ", ",
- namesof("mu2", lmu12, earg = emu12), ", ",
- namesof("rho", lrho, earg = erho)),
- constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(c(1, 1, 0, 0, 0, 1), 3, 2), x,
- .exchangeable, constraints, intercept.apply = TRUE)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .exchangeable = exchangeable, .zero = zero ))),
- initialize = eval(substitute(expression({
- mustart.orig = mustart
- eval(process.binomial2.data.vgam)
+ lmu12 <- "probit" # But emu may contain some arguments.
+ emu12 <- emu # list()
- if (length(mustart.orig))
- mustart = mustart.orig # Retain it if inputted
- predictors.names = c(
- namesof("mu1", .lmu12, earg = .emu12, short = TRUE),
- namesof("mu2", .lmu12, earg = .emu12, short = TRUE),
- namesof("rho", .lrho, earg = .erho, short = TRUE))
- if (is.null( .nsimEIM)) {
- save.weight <- control$save.weight <- FALSE
- }
+ if (is.Numeric(nsimEIM)) {
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE))
+ stop("bad input for argument 'nsimEIM'")
+ if (nsimEIM <= 100)
+ warning("'nsimEIM' should be an integer greater than 100")
+ }
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
- ycounts = if (is.numeric(extra$orig.w)) y * c(w) / extra$orig.w else
- y * c(w) # Convert proportions to counts
- if (max(abs(ycounts - round(ycounts))) > 1.0e-6)
- warning("the response (as counts) does not appear to ",
- "be integer-valued. Am rounding to integer values.")
- ycounts = round(ycounts) # Make sure it is an integer
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
- if (is.null(etastart)) {
- if (length(mustart.orig)) {
- mu1.init = mustart.orig[,3] + mustart.orig[,4]
- mu2.init = mustart.orig[,2] + mustart.orig[,4]
- } else if ( .imethod == 1) {
- glm1.fit = glm(cbind(ycounts[,3] + ycounts[,4],
- ycounts[,1] + ycounts[,2]) ~ x - 1,
- fam = binomial("probit"))
- glm2.fit = glm(cbind(ycounts[,2] + ycounts[,4],
- ycounts[,1] + ycounts[,3]) ~ x - 1,
- fam = binomial("probit"))
- mu1.init = fitted(glm1.fit)
- mu2.init = fitted(glm2.fit)
- } else if ( .imethod == 2) {
- mu1.init = if (is.Numeric( .imu1 ))
- rep( .imu1 , length = n) else
- mu[,3] + mu[,4]
- mu2.init = if (is.Numeric( .imu2 ))
- rep( .imu2 , length = n) else
- mu[,2] + mu[,4]
- } else {
- stop("bad value for argument 'imethod'")
- }
-
-
-
- binom2.rho.Loglikfun = function(rhoval, y, x, w, extraargs) {
- init.mu1 = extraargs$initmu1
- init.mu2 = extraargs$initmu2
- ycounts = extraargs$ycounts
- nvec = extraargs$nvec
- eta1 = qnorm(init.mu1)
- eta2 = qnorm(init.mu2)
- p11 = pnorm2(eta1, eta2, rhoval)
- p01 = pmin(init.mu2 - p11, init.mu2)
- p10 = pmin(init.mu1 - p11, init.mu1)
- p00 = 1.0 - p01 - p10 - p11
- mumat = abs(cbind("00" = p00, "01" = p01, "10" = p10, "11" = p11))
- mumat = mumat / rowSums(mumat)
- mumat[mumat < 1.0e-100] = 1.0e-100
-
- sum((if (is.numeric(extraargs$orig.w)) extraargs$orig.w else 1) *
- dmultinomial(x = ycounts, size = nvec, prob = mumat,
- log = TRUE, dochecking = FALSE))
- }
- rho.grid = seq(-0.95, 0.95, len=31)
- try.this = getMaxMin(rho.grid, objfun=binom2.rho.Loglikfun,
- y=y, x=x, w=w, extraargs = list(
- orig.w = extra$orig.w,
- ycounts = ycounts,
- initmu1 = mu1.init,
- initmu2 = mu2.init,
- nvec = nvec
- ))
-
-
- rho.init = if (is.Numeric( .irho ))
- rep( .irho , len = n) else {
- try.this
- }
-
- etastart = cbind(theta2eta(mu1.init, .lmu12, earg = .emu12),
- theta2eta(mu2.init, .lmu12, earg = .emu12),
- theta2eta(rho.init, .lrho, earg = .erho))
- mustart <- NULL # Since etastart has been computed.
- }
- }), list( .lmu12 = lmu12, .emu12 = emu12, .nsimEIM = nsimEIM,
- .lrho = lrho, .erho = erho,
- .imethod = imethod,
- .imu1 = imu1, .imu2 = imu2, .irho = irho ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- pmargin = cbind(eta2theta(eta[,1], .lmu12, earg = .emu12),
- eta2theta(eta[,2], .lmu12, earg = .emu12))
- rho = eta2theta(eta[,3], .lrho, earg = .erho)
- p11 = pnorm2(eta[,1], eta[,2], rho)
- p01 = pmin(pmargin[,2] - p11, pmargin[,2])
- p10 = pmin(pmargin[,1] - p11, pmargin[,1])
- p00 = 1.0 - p01 - p10 - p11
- ansmat = abs(cbind("00" = p00, "01" = p01, "10" = p10, "11" = p11))
- ansmat / rowSums(ansmat)
- }, list( .lmu12 = lmu12, .emu12 = emu12, .lrho = lrho, .erho = erho ))),
- last = eval(substitute(expression({
- misc$link = c(mu1 = .lmu12, mu2 = .lmu12, rho = .lrho)
- misc$earg = list(mu1 = .emu12, mu2 = .emu12, rho = .erho)
- misc$nsimEIM = .nsimEIM
- misc$expected = TRUE
- }), list( .lmu12 = lmu12, .lrho = lrho, .nsimEIM = nsimEIM,
- .emu12 = emu12, .erho = erho ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
+ new("vglmff",
+ blurb = c("Bivariate probit model\n",
+ "Links: ",
+ namesof("mu1", lmu12, earg = emu12), ", ",
+ namesof("mu2", lmu12, earg = emu12), ", ",
+ namesof("rho", lrho, earg = erho)),
+ constraints = eval(substitute(expression({
+ constraints = cm.vgam(matrix(c(1, 1, 0, 0, 0, 1), 3, 2), x,
+ .exchangeable, constraints, intercept.apply = TRUE)
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .exchangeable = exchangeable, .zero = zero ))),
+ initialize = eval(substitute(expression({
+ mustart.orig <- mustart
+ eval(process.binomial2.data.vgam)
- ycounts = if (is.numeric(extra$orig.w)) y * c(w) / extra$orig.w else
- y * c(w) # Convert proportions to counts
+ if (length(mustart.orig))
+ mustart <- mustart.orig # Retain it if inputted
- smallno = 1.0e4 * .Machine$double.eps
- if (max(abs(ycounts - round(ycounts))) > smallno)
- warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts = round(ycounts)
+ predictors.names <- c(
+ namesof("mu1", .lmu12, earg = .emu12, short = TRUE),
+ namesof("mu2", .lmu12, earg = .emu12, short = TRUE),
+ namesof("rho", .lrho, earg = .erho, short = TRUE))
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
+ if (is.null( .nsimEIM)) {
+ save.weight <- control$save.weight <- FALSE
+ }
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, dochecking = FALSE))
- }
- }, list( .erho = erho ))),
- vfamily = c("binom2.rho", "binom2"),
- deriv = eval(substitute(expression({
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
- ycounts = if (is.numeric(extra$orig.w)) y * c(w) / extra$orig.w else
- y * c(w) # Convert proportions to counts
-
- pmargin = cbind(eta2theta(eta[,1], .lmu12, earg = .emu12),
- eta2theta(eta[,2], .lmu12, earg = .emu12))
- rhovec = eta2theta(eta[,3], .lrho, earg = .erho)
- p11 = pnorm2(eta[,1], eta[,2], rhovec)
- p01 = pmargin[,2]-p11
- p10 = pmargin[,1]-p11
- p00 = 1-p01-p10-p11
- ABmat = (eta[,1:2] - rhovec * eta[,2:1]) / sqrt(1.0 - rhovec^2)
- PhiA = pnorm(ABmat[,1])
- PhiB = pnorm(ABmat[,2])
- onemPhiA = pnorm(ABmat[,1], lower.tail = FALSE)
- onemPhiB = pnorm(ABmat[,2], lower.tail = FALSE)
-
- smallno = 1000 * .Machine$double.eps
- p00[p00 < smallno] = smallno
- p01[p01 < smallno] = smallno
- p10[p10 < smallno] = smallno
- p11[p11 < smallno] = smallno
-
- dprob00 = dnorm2(eta[,1], eta[,2], rhovec)
- dl.dprob1 = PhiB * (ycounts[,4]/p11 - ycounts[,2]/p01) +
- onemPhiB * (ycounts[,3]/p10 - ycounts[,1]/p00)
- dl.dprob2 = PhiA * (ycounts[,4]/p11 - ycounts[,3]/p10) +
- onemPhiA * (ycounts[,2]/p01 - ycounts[,1]/p00)
- dl.drho = (ycounts[,4]/p11 - ycounts[,3]/p10 -
- ycounts[,2]/p01 + ycounts[,1]/p00) * dprob00
- dprob1.deta = dtheta.deta(pmargin[,1], .lmu12, earg = .emu12)
- dprob2.deta = dtheta.deta(pmargin[,2], .lmu12, earg = .emu12)
- drho.deta = dtheta.deta(rhovec, .lrho, earg = .erho)
- dthetas.detas = cbind(dprob1.deta, dprob2.deta, drho.deta)
-
- (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- cbind(dl.dprob1, dl.dprob2, dl.drho) * dthetas.detas
- }), list( .lmu12 = lmu12, .emu12 = emu12, .lrho = lrho, .erho = erho ))),
- weight = eval(substitute(expression({
- if (is.null( .nsimEIM )) {
- d2l.dprob1prob1 = PhiB^2 * (1/p11 + 1/p01) +
- onemPhiB^2 * (1/p10 + 1/p00)
- d2l.dprob2prob2 = PhiA^2 * (1/p11 + 1/p10) +
- onemPhiA^2 * (1/p01 + 1/p00)
- d2l.dprob1prob2 = PhiA * ( PhiB/p11 - onemPhiB/p10) +
- onemPhiA * (onemPhiB/p00 - PhiB/p01)
- d2l.dprob1rho = (PhiB * (1/p11 + 1/p01) -
- onemPhiB * (1/p10 + 1/p00)) * dprob00
- d2l.dprob2rho = (PhiA * (1/p11 + 1/p10) -
- onemPhiA * (1/p01 + 1/p00)) * dprob00
- d2l.drho2 = (1/p11 + 1/p01 + 1/p10 + 1/p00) * dprob00^2
- wz = matrix(0, n, dimm(M)) # 6=dimm(M)
- wz[,iam(1,1,M)] = d2l.dprob1prob1 * dprob1.deta^2
- wz[,iam(2,2,M)] = d2l.dprob2prob2 * dprob2.deta^2
- wz[,iam(1,2,M)] = d2l.dprob1prob2 * dprob1.deta * dprob2.deta
- wz[,iam(1,3,M)] = d2l.dprob1rho * dprob1.deta * drho.deta
- wz[,iam(2,3,M)] = d2l.dprob2rho * dprob2.deta * drho.deta
- wz[,iam(3,3,M)] = d2l.drho2 * drho.deta^2
- } else {
- run.varcov = 0
- ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- for (ii in 1:( .nsimEIM )) {
- ysim = rbinom2.rho(n, mu1 = pmargin[,1], mu2 = pmargin[,2],
- twoCols = FALSE, rho = rhovec)
- dl.dprob1 = PhiB * (ysim[,4]/p11 - ysim[,2]/p01) +
- onemPhiB * (ysim[,3]/p10 - ysim[,1]/p00)
- dl.dprob2 = PhiA * (ysim[,4]/p11 - ysim[,3]/p10) +
- onemPhiA * (ysim[,2]/p01 - ysim[,1]/p00)
- dl.drho = (ysim[,4]/p11 - ysim[,3]/p10 -
- ysim[,2]/p01 + ysim[,1]/p00) * dprob00
-
- rm(ysim)
- temp3 = cbind(dl.dprob1, dl.dprob2, dl.drho)
- run.varcov = ((ii-1) * run.varcov +
- temp3[,ind1$row.index] * temp3[,ind1$col.index]) / ii
- }
- wz = if (intercept.only)
- matrix(colMeans(run.varcov),
- n, ncol(run.varcov), byrow = TRUE) else run.varcov
-
- wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
+ ycounts <- if (is.numeric(extra$orig.w)) y * c(w) / extra$orig.w else
+ y * c(w) # Convert proportions to counts
+ if (max(abs(ycounts - round(ycounts))) > 1.0e-6)
+ warning("the response (as counts) does not appear to ",
+ "be integer-valued. Am rounding to integer values.")
+ ycounts <- round(ycounts) # Make sure it is an integer
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+
+
+ if (is.null(etastart)) {
+ if (length(mustart.orig)) {
+ mu1.init <- mustart.orig[, 3] + mustart.orig[, 4]
+ mu2.init <- mustart.orig[, 2] + mustart.orig[, 4]
+ } else if ( .imethod == 1) {
+ glm1.fit <- glm(cbind(ycounts[, 3] + ycounts[, 4],
+ ycounts[, 1] + ycounts[, 2]) ~ x - 1,
+ fam = binomial("probit"))
+ glm2.fit <- glm(cbind(ycounts[, 2] + ycounts[, 4],
+ ycounts[, 1] + ycounts[, 3]) ~ x - 1,
+ fam = binomial("probit"))
+ mu1.init <- fitted(glm1.fit)
+ mu2.init <- fitted(glm2.fit)
+ } else if ( .imethod == 2) {
+ mu1.init <- if (is.Numeric( .imu1 ))
+ rep( .imu1 , length = n) else
+ mu[, 3] + mu[, 4]
+ mu2.init <- if (is.Numeric( .imu2 ))
+ rep( .imu2 , length = n) else
+ mu[, 2] + mu[, 4]
+ } else {
+ stop("bad value for argument 'imethod'")
+ }
+
+
+
+ binom2.rho.Loglikfun =
+ function(rhoval, y, x, w, extraargs) {
+ init.mu1 = extraargs$initmu1
+ init.mu2 = extraargs$initmu2
+ ycounts = extraargs$ycounts
+ nvec = extraargs$nvec
+ eta1 = qnorm(init.mu1)
+ eta2 = qnorm(init.mu2)
+ p11 = pnorm2(eta1, eta2, rhoval)
+ p01 = pmin(init.mu2 - p11, init.mu2)
+ p10 = pmin(init.mu1 - p11, init.mu1)
+ p00 = 1.0 - p01 - p10 - p11
+ mumat = abs(cbind("00" = p00,
+ "01" = p01,
+ "10" = p10,
+ "11" = p11))
+ mumat = mumat / rowSums(mumat)
+ mumat[mumat < 1.0e-100] = 1.0e-100
+
+ sum((if (is.numeric(extraargs$orig.w)) extraargs$orig.w else 1) *
+ dmultinomial(x = ycounts, size = nvec, prob = mumat,
+ log = TRUE, dochecking = FALSE))
}
- c(w) * wz
- }), list( .nsimEIM = nsimEIM ))))
+ rho.grid = seq(-0.95, 0.95, len=31)
+ try.this = getMaxMin(rho.grid, objfun=binom2.rho.Loglikfun,
+ y=y, x=x, w=w, extraargs = list(
+ orig.w = extra$orig.w,
+ ycounts = ycounts,
+ initmu1 = mu1.init,
+ initmu2 = mu2.init,
+ nvec = nvec
+ ))
+
+
+ rho.init = if (is.Numeric( .irho ))
+ rep( .irho , len = n) else {
+ try.this
+ }
+
+ etastart = cbind(theta2eta(mu1.init, .lmu12, earg = .emu12),
+ theta2eta(mu2.init, .lmu12, earg = .emu12),
+ theta2eta(rho.init, .lrho, earg = .erho))
+ mustart <- NULL # Since etastart has been computed.
+ }
+ }), list( .lmu12 = lmu12, .emu12 = emu12, .nsimEIM = nsimEIM,
+ .lrho = lrho, .erho = erho,
+ .imethod = imethod,
+ .imu1 = imu1, .imu2 = imu2, .irho = irho ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ pmargin = cbind(eta2theta(eta[, 1], .lmu12, earg = .emu12),
+ eta2theta(eta[, 2], .lmu12, earg = .emu12))
+ rho = eta2theta(eta[, 3], .lrho, earg = .erho)
+ p11 = pnorm2(eta[, 1], eta[, 2], rho)
+ p01 = pmin(pmargin[, 2] - p11, pmargin[, 2])
+ p10 = pmin(pmargin[, 1] - p11, pmargin[, 1])
+ p00 = 1.0 - p01 - p10 - p11
+ ansmat = abs(cbind("00" = p00, "01" = p01, "10" = p10, "11" = p11))
+ ansmat / rowSums(ansmat)
+ }, list( .lmu12 = lmu12, .emu12 = emu12, .lrho = lrho, .erho = erho ))),
+ last = eval(substitute(expression({
+ misc$link = c(mu1 = .lmu12, mu2 = .lmu12, rho = .lrho)
+ misc$earg = list(mu1 = .emu12, mu2 = .emu12, rho = .erho)
+ misc$nsimEIM = .nsimEIM
+ misc$expected = TRUE
+ }), list( .lmu12 = lmu12, .lrho = lrho, .nsimEIM = nsimEIM,
+ .emu12 = emu12, .erho = erho ))),
+
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+
+ ycounts = if (is.numeric(extra$orig.w))
+ y * c(w) / extra$orig.w else
+ y * c(w) # Convert proportions to counts
+
+ smallno = 1.0e4 * .Machine$double.eps
+ if (max(abs(ycounts - round(ycounts))) > smallno)
+ warning("converting 'ycounts' to integer in @loglikelihood")
+ ycounts = round(ycounts)
+
+ nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+
+ sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dmultinomial(x = ycounts, size = nvec, prob = mu,
+ log = TRUE, dochecking = FALSE))
+ }
+ }, list( .erho = erho ))),
+ vfamily = c("binom2.rho", "binom2"),
+ deriv = eval(substitute(expression({
+ nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+ ycounts = if (is.numeric(extra$orig.w)) y * c(w) / extra$orig.w else
+ y * c(w) # Convert proportions to counts
+
+ pmargin = cbind(eta2theta(eta[, 1], .lmu12, earg = .emu12),
+ eta2theta(eta[, 2], .lmu12, earg = .emu12))
+ rhovec = eta2theta(eta[, 3], .lrho, earg = .erho)
+ p11 = pnorm2(eta[, 1], eta[, 2], rhovec)
+ p01 = pmargin[, 2]-p11
+ p10 = pmargin[, 1]-p11
+ p00 = 1-p01-p10-p11
+
+ ABmat = (eta[, 1:2] - rhovec * eta[, 2:1]) / sqrt(1.0 - rhovec^2)
+ PhiA = pnorm(ABmat[, 1])
+ PhiB = pnorm(ABmat[, 2])
+ onemPhiA = pnorm(ABmat[, 1], lower.tail = FALSE)
+ onemPhiB = pnorm(ABmat[, 2], lower.tail = FALSE)
+
+ smallno = 1000 * .Machine$double.eps
+ p00[p00 < smallno] = smallno
+ p01[p01 < smallno] = smallno
+ p10[p10 < smallno] = smallno
+ p11[p11 < smallno] = smallno
+
+ dprob00 = dnorm2(eta[, 1], eta[, 2], rhovec)
+ dl.dprob1 = PhiB * (ycounts[, 4]/p11 - ycounts[, 2]/p01) +
+ onemPhiB * (ycounts[, 3]/p10 - ycounts[, 1]/p00)
+ dl.dprob2 = PhiA * (ycounts[, 4]/p11 - ycounts[, 3]/p10) +
+ onemPhiA * (ycounts[, 2]/p01 - ycounts[, 1]/p00)
+ dl.drho = (ycounts[, 4]/p11 - ycounts[, 3]/p10 -
+ ycounts[, 2]/p01 + ycounts[, 1]/p00) * dprob00
+ dprob1.deta = dtheta.deta(pmargin[, 1], .lmu12, earg = .emu12)
+ dprob2.deta = dtheta.deta(pmargin[, 2], .lmu12, earg = .emu12)
+ drho.deta = dtheta.deta(rhovec, .lrho, earg = .erho)
+ dthetas.detas = cbind(dprob1.deta, dprob2.deta, drho.deta)
+
+ (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ cbind(dl.dprob1, dl.dprob2, dl.drho) * dthetas.detas
+ }), list( .lmu12 = lmu12, .emu12 = emu12, .lrho = lrho, .erho = erho ))),
+ weight = eval(substitute(expression({
+ if (is.null( .nsimEIM )) {
+ d2l.dprob1prob1 = PhiB^2 * (1/p11 + 1/p01) +
+ onemPhiB^2 * (1/p10 + 1/p00)
+ d2l.dprob2prob2 = PhiA^2 * (1/p11 + 1/p10) +
+ onemPhiA^2 * (1/p01 + 1/p00)
+ d2l.dprob1prob2 = PhiA * ( PhiB/p11 - onemPhiB/p10) +
+ onemPhiA * (onemPhiB/p00 - PhiB/p01)
+ d2l.dprob1rho = (PhiB * (1/p11 + 1/p01) -
+ onemPhiB * (1/p10 + 1/p00)) * dprob00
+ d2l.dprob2rho = (PhiA * (1/p11 + 1/p10) -
+ onemPhiA * (1/p01 + 1/p00)) * dprob00
+ d2l.drho2 = (1/p11 + 1/p01 + 1/p10 + 1/p00) * dprob00^2
+ wz = matrix(0, n, dimm(M)) # 6=dimm(M)
+ wz[, iam(1, 1, M)] = d2l.dprob1prob1 * dprob1.deta^2
+ wz[, iam(2, 2, M)] = d2l.dprob2prob2 * dprob2.deta^2
+ wz[, iam(1, 2, M)] = d2l.dprob1prob2 * dprob1.deta * dprob2.deta
+ wz[, iam(1, 3, M)] = d2l.dprob1rho * dprob1.deta * drho.deta
+ wz[, iam(2, 3, M)] = d2l.dprob2rho * dprob2.deta * drho.deta
+ wz[, iam(3, 3, M)] = d2l.drho2 * drho.deta^2
+ } else {
+ run.varcov = 0
+ ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ for (ii in 1:( .nsimEIM )) {
+ ysim = rbinom2.rho(n, mu1 = pmargin[, 1], mu2 = pmargin[, 2],
+ twoCols = FALSE, rho = rhovec)
+ dl.dprob1 = PhiB * (ysim[, 4]/p11 - ysim[, 2]/p01) +
+ onemPhiB * (ysim[, 3]/p10 - ysim[, 1]/p00)
+ dl.dprob2 = PhiA * (ysim[, 4]/p11 - ysim[, 3]/p10) +
+ onemPhiA * (ysim[, 2]/p01 - ysim[, 1]/p00)
+ dl.drho = (ysim[, 4]/p11 - ysim[, 3]/p10 -
+ ysim[, 2]/p01 + ysim[, 1]/p00) * dprob00
+
+ rm(ysim)
+ temp3 = cbind(dl.dprob1, dl.dprob2, dl.drho)
+ run.varcov = ((ii-1) * run.varcov +
+ temp3[, ind1$row.index] * temp3[, ind1$col.index]) / ii
+ }
+ wz = if (intercept.only)
+ matrix(colMeans(run.varcov),
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
+
+ wz = wz * dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
+ }
+ c(w) * wz
+ }), list( .nsimEIM = nsimEIM ))))
}
dnorm2 <- function(x, y, rho = 0, log = FALSE) {
- log.arg = log
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
rm(log)
+
if (log.arg) {
(-0.5*(x^2 + y^2 - 2*x*y*rho)/(1.0-rho^2)) - log(2) - log(pi) -
0.5 * log1p(-rho^2)
@@ -981,13 +1024,13 @@ dnorm2 <- function(x, y, rho = 0, log = FALSE) {
pnorm2 <- function(ah, ak, r) {
- ans <- ah
- size <- length(ah)
- singler <- ifelse(length(r) == 1, 1, 0)
- dotC(name = "pnorm2", ah=as.double(-ah), ak=as.double(-ak),
- r=as.double(r),
- size=as.integer(size), singler=as.integer(singler),
- ans=as.double(ans))$ans
+ ans <- ah
+ size <- length(ah)
+ singler <- ifelse(length(r) == 1, 1, 0)
+ dotC(name = "pnorm2", ah = as.double(-ah), ak = as.double(-ak),
+ r = as.double(r),
+ size = as.integer(size), singler = as.integer(singler),
+ ans = as.double(ans))$ans
}
@@ -999,84 +1042,90 @@ my.dbinom <- function(x,
prob = stop("no 'prob' argument"))
{
- exp( lgamma(size + 1) - lgamma(size - x +1) - lgamma(x + 1) +
- x * log(prob / (1 - prob)) + size * log1p(-prob) )
+ exp(lgamma(size + 1) - lgamma(size - x +1) - lgamma(x + 1) +
+ x * log(prob / (1 - prob)) + size * log1p(-prob))
}
- size.binomial <- function(prob = 0.5, link = "loge", earg = list())
+ size.binomial <- function(prob = 0.5, link = "loge")
{
- if (any(prob <= 0 || prob >= 1))
- stop("some values of prob out of range")
+ if (any(prob <= 0 || prob >= 1))
+ stop("some values of prob out of range")
- if (mode(link) != "character" && mode(link) != "name")
- link <- as.character(substitute(link))
- if (!is.list(earg)) earg = list()
- new("vglmff",
- blurb = c("Binomial with n unknown, prob known (prob = ",prob,")\n",
- "Links: ",
- namesof("size", link, tag = TRUE),
- " (treated as real-valued)\n",
- "Variance: Var(Y) = size * prob * (1-prob);",
- " Var(size) is intractable"),
- initialize = eval(substitute(expression({
- predictors.names <- "size"
- extra$temp2 <- rep( .prob , length = n)
- if (is.null(etastart)) {
- nvec <- (y+0.1)/extra$temp2
- etastart <- theta2eta(nvec, .link )
- }
- }), list( .prob = prob, .link = link ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- nvec <- eta2theta(eta, .link)
- nvec * extra$temp2
- }, list( .link = link ))),
- last = eval(substitute(expression({
- misc$link <- c(size = .link)
- misc$prob <- extra$temp2
- }), list( .link = link ))),
- linkfun = eval(substitute(function(mu, extra = NULL) {
- nvec <- mu / extra$temp2
- theta2eta(nvec, .link)
- }, list( .link = link ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, res = FALSE,eta, extra = NULL) {
- nvec <- mu / extra$temp2
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
- sum(w * (lgamma(nvec+1) - lgamma(y+1) - lgamma(nvec-y+1) +
- y * log(.prob / (1- .prob)) + nvec * log1p(- .prob)))
- }
- }, list( .prob = prob ))),
- vfamily = c("size.binomial"),
- deriv = eval(substitute(expression({
- nvec <- mu/extra$temp2
- dldnvec = digamma(nvec+1) - digamma(nvec-y+1) + log1p(-extra$temp2)
- dnvecdeta <- dtheta.deta(nvec, .link)
- c(w) * cbind(dldnvec * dnvecdeta)
- }), list( .link = link ))),
- weight = eval(substitute(expression({
- d2ldnvec2 <- trigamma(nvec+1) - trigamma(nvec-y+1)
- # Note: if y == 0 then d2ldnvec2 is 0. Below is a quick fix.
- d2ldnvec2[y == 0] = -sqrt(.Machine$double.eps)
- wz = -c(w) * dnvecdeta^2 * d2ldnvec2
- wz
- }), list( .link = link ))))
+
+
+ new("vglmff",
+ blurb = c("Binomial with n unknown, prob known (prob = ", prob, ")\n",
+ "Links: ",
+ namesof("size", link, tag = TRUE),
+ " (treated as real-valued)\n",
+ "Variance: Var(Y) = size * prob * (1-prob);",
+ " Var(size) is intractable"),
+ initialize = eval(substitute(expression({
+ predictors.names <- "size"
+ extra$temp2 <- rep( .prob , length = n)
+
+ if (is.null(etastart)) {
+ nvec <- (y+0.1)/extra$temp2
+ etastart <- theta2eta(nvec, .link )
+ }
+ }), list( .prob = prob, .link = link ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ nvec <- eta2theta(eta, .link)
+ nvec * extra$temp2
+ }, list( .link = link ))),
+ last = eval(substitute(expression({
+ misc$link <- c(size = .link)
+ misc$prob <- extra$temp2
+ }), list( .link = link ))),
+ linkfun = eval(substitute(function(mu, extra = NULL) {
+ nvec <- mu / extra$temp2
+ theta2eta(nvec, .link)
+ }, list( .link = link ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, res = FALSE,eta, extra = NULL) {
+ nvec <- mu / extra$temp2
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+
+ sum(c(w) * (lgamma(nvec+1) - lgamma(y+1) - lgamma(nvec-y+1) +
+ y * log( .prob / (1- .prob )) +
+ nvec * log1p(- .prob )))
+ }
+ }, list( .prob = prob ))),
+ vfamily = c("size.binomial"),
+ deriv = eval(substitute(expression({
+ nvec <- mu/extra$temp2
+ dldnvec = digamma(nvec+1) - digamma(nvec-y+1) + log1p(-extra$temp2)
+ dnvecdeta <- dtheta.deta(nvec, .link)
+ c(w) * cbind(dldnvec * dnvecdeta)
+ }), list( .link = link ))),
+ weight = eval(substitute(expression({
+ d2ldnvec2 <- trigamma(nvec+1) - trigamma(nvec-y+1)
+ d2ldnvec2[y == 0] = -sqrt(.Machine$double.eps)
+ wz = -c(w) * dnvecdeta^2 * d2ldnvec2
+ wz
+ }), list( .link = link ))))
}
- dbetabinom.ab = function(x, size, shape1, shape2, log = FALSE,
- .dontuse.prob = NULL) {
+ dbetabinom.ab <- function(x, size, shape1, shape2, log = FALSE,
+ .dontuse.prob = NULL) {
- log.arg = log
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
rm(log)
+
if (!is.Numeric(x))
stop("bad input for argument 'x'")
if (!is.Numeric(size, integer.valued = TRUE))
@@ -1134,7 +1183,7 @@ my.dbinom <- function(x,
- pbetabinom.ab = function(q, size, shape1, shape2, log.p = FALSE) {
+ pbetabinom.ab <- function(q, size, shape1, shape2, log.p = FALSE) {
if (!is.Numeric(q))
stop("bad input for argument 'q'")
@@ -1158,8 +1207,9 @@ my.dbinom <- function(x,
max(abs(shape2 - shape2[1])) < 1.0e-08) {
qstar = floor(q)
temp = if (max(qstar) >= 0) {
- dbetabinom.ab(0:max(qstar), size = size[1], shape1 = shape1[1],
- shape2 = shape2[1])
+ dbetabinom.ab(0:max(qstar), size = size[1],
+ shape1 = shape1[1],
+ shape2 = shape2[1])
} else {
0 * qstar
}
@@ -1173,7 +1223,8 @@ my.dbinom <- function(x,
qstar = floor(q[ii])
ans[ii] = if (qstar >= 0) {
sum(dbetabinom.ab(x = 0:qstar, size = size[ii],
- shape1 = shape1[ii], shape2 = shape2[ii]))
+ shape1 = shape1[ii],
+ shape2 = shape2[ii]))
} else 0
}
}
@@ -1182,7 +1233,7 @@ my.dbinom <- function(x,
- rbetabinom.ab = function(n, size, shape1, shape2,
+ rbetabinom.ab <- function(n, size, shape1, shape2,
.dontuse.prob = NULL) {
if (!is.Numeric(size, integer.valued = TRUE))
@@ -1233,20 +1284,20 @@ my.dbinom <- function(x,
- dbetabinom = function(x, size, prob, rho = 0, log = FALSE) {
+ dbetabinom <- function(x, size, prob, rho = 0, log = FALSE) {
dbetabinom.ab(x = x, size = size, shape1 = prob*(1-rho)/rho,
shape2 = (1-prob)*(1-rho)/rho, log = log,
.dontuse.prob = prob)
}
- pbetabinom = function(q, size, prob, rho, log.p = FALSE) {
+ pbetabinom <- function(q, size, prob, rho, log.p = FALSE) {
pbetabinom.ab(q = q, size = size, shape1 = prob*(1-rho)/rho,
shape2 = (1-prob)*(1-rho)/rho, log.p = log.p)
}
- rbetabinom = function(n, size, prob, rho = 0) {
+ rbetabinom <- function(n, size, prob, rho = 0) {
rbetabinom.ab(n = n, size = size, shape1 = prob*(1-rho)/rho,
shape2 = (1-prob)*(1-rho)/rho,
.dontuse.prob = prob)
@@ -1254,32 +1305,32 @@ my.dbinom <- function(x,
- expected.betabin.ab = function(nvec, shape1, shape2, first) {
+ expected.betabin.ab <- function(nvec, shape1, shape2, first) {
- NN = length(nvec)
- ans = rep(0.0, len = NN)
- if (first) {
- for (ii in 1:NN) {
- temp639 = lbeta(shape1[ii], shape2[ii])
- yy = 0:nvec[ii]
- ans[ii] = ans[ii] + sum(trigamma(shape1[ii] + yy) *
- exp(lchoose(nvec[ii], yy) +
- lbeta(shape1[ii]+yy, shape2[ii]+nvec[ii]-yy) -
- temp639))
- }
- } else {
- for (ii in 1:NN) {
- temp639 = lbeta(shape1[ii], shape2[ii])
- yy = 0:nvec[ii]
- ans[ii] = ans[ii] + sum(trigamma(nvec[ii]+shape2[ii] - yy) *
- exp(lchoose(nvec[ii], yy) +
- lbeta(shape1[ii]+yy, shape2[ii]+nvec[ii]-yy) -
- temp639))
- }
+ NN <- length(nvec)
+ ans <- rep(0.0, len = NN)
+ if (first) {
+ for (ii in 1:NN) {
+ temp639 <- lbeta(shape1[ii], shape2[ii])
+ yy <- 0:nvec[ii]
+ ans[ii] <- ans[ii] + sum(trigamma(shape1[ii] + yy) *
+ exp(lchoose(nvec[ii], yy) +
+ lbeta(shape1[ii]+yy, shape2[ii]+nvec[ii]-yy) -
+ temp639))
+ }
+ } else {
+ for (ii in 1:NN) {
+ temp639 <- lbeta(shape1[ii], shape2[ii])
+ yy <- 0:nvec[ii]
+ ans[ii] <- ans[ii] + sum(trigamma(nvec[ii]+shape2[ii] - yy) *
+ exp(lchoose(nvec[ii], yy) +
+ lbeta(shape1[ii]+yy, shape2[ii]+nvec[ii]-yy) -
+ temp639))
}
- ans
+ }
+ ans
}
@@ -1291,12 +1342,19 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
- betabinomial.ab = function(lshape12 = "loge", earg = list(),
- i1 = 1, i2 = NULL, imethod = 1,
- shrinkage.init = 0.95, nsimEIM = NULL,
- zero = NULL) {
- if (mode(lshape12) != "character" && mode(lshape12) != "name")
- lshape12 = as.character(substitute(lshape12))
+ betabinomial.ab <- function(lshape12 = "loge",
+ i1 = 1, i2 = NULL, imethod = 1,
+ shrinkage.init = 0.95, nsimEIM = NULL,
+ zero = NULL) {
+
+
+ lshape12 <- as.list(substitute(lshape12))
+ earg <- link2list(lshape12)
+ lshape12 <- attr(earg, "function.name")
+
+
+
+
if (!is.Numeric(i1, positive = TRUE))
stop("bad input for argument 'i1'")
if (!is.Numeric(imethod, allowable.length = 1,
@@ -1306,7 +1364,6 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
if (length(i2) && !is.Numeric(i2, positive = TRUE))
stop("bad input for argument 'i2'")
- if (!is.list(earg)) earg = list()
if (!is.null(nsimEIM)) {
if (!is.Numeric(nsimEIM, allowable.length = 1,
@@ -1357,7 +1414,8 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
} else if ( .imethod == 1) {
shape1 * (1 / weighted.mean(y, w) - 1)
} else if ( .imethod == 2) {
- temp777 = .sinit * weighted.mean(y, w) + (1- .sinit) * y
+ temp777 = .sinit * weighted.mean(y, w) +
+ (1 - .sinit) * y
shape1 * (1 / temp777 - 1)
} else {
shape1 * (1 / weighted.mean(mustart.use, w) - 1)
@@ -1376,15 +1434,15 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
.nsimEIM = nsimEIM,
.imethod = imethod, .sinit = shrinkage.init ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- shape1 = eta2theta(eta[,1], .lshape12, earg = .earg)
- shape2 = eta2theta(eta[,2], .lshape12, earg = .earg)
+ shape1 = eta2theta(eta[, 1], .lshape12, earg = .earg)
+ shape2 = eta2theta(eta[, 2], .lshape12, earg = .earg)
shape1 / (shape1 + shape2)
}, list( .lshape12 = lshape12, .earg = earg ))),
last = eval(substitute(expression({
misc$link = c("shape1" = .lshape12, "shape2" = .lshape12)
misc$earg <- list(shape1 = .earg, shape2 = .earg)
- shape1 = eta2theta(eta[,1], .lshape12, earg = .earg)
- shape2 = eta2theta(eta[,2], .lshape12, earg = .earg)
+ shape1 = eta2theta(eta[, 1], .lshape12, earg = .earg)
+ shape2 = eta2theta(eta[, 2], .lshape12, earg = .earg)
misc$rho = 1 / (shape1 + shape2 + 1)
misc$expected = TRUE
misc$nsimEIM = .nsimEIM
@@ -1401,8 +1459,8 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
warning("converting 'ycounts' to integer in @loglikelihood")
ycounts = round(ycounts)
- shape1 = eta2theta(eta[,1], .lshape12, earg = .earg)
- shape2 = eta2theta(eta[,2], .lshape12, earg = .earg)
+ shape1 = eta2theta(eta[, 1], .lshape12, earg = .earg)
+ shape2 = eta2theta(eta[, 2], .lshape12, earg = .earg)
nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
round(w)
if (residuals)
@@ -1414,66 +1472,68 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
}, list( .lshape12 = lshape12, .earg = earg ))),
vfamily = c("betabinomial.ab"),
deriv = eval(substitute(expression({
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
- shape1 = eta2theta(eta[,1], .lshape12, earg = .earg)
- shape2 = eta2theta(eta[,2], .lshape12, earg = .earg)
- dshape1.deta = dtheta.deta(shape1, .lshape12, earg = .earg)
- dshape2.deta = dtheta.deta(shape2, .lshape12, earg = .earg)
- dl.dshape1 = digamma(shape1+ycounts) -
- digamma(shape1+shape2+nvec) -
- digamma(shape1) + digamma(shape1 + shape2)
- dl.dshape2 = digamma(nvec + shape2 - ycounts) -
- digamma(shape1 + shape2 + nvec) -
- digamma(shape2) + digamma(shape1 + shape2)
- (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- cbind(dl.dshape1 * dshape1.deta,
- dl.dshape2 * dshape2.deta)
+ nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+ ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+ shape1 = eta2theta(eta[, 1], .lshape12, earg = .earg)
+ shape2 = eta2theta(eta[, 2], .lshape12, earg = .earg)
+ dshape1.deta = dtheta.deta(shape1, .lshape12, earg = .earg)
+ dshape2.deta = dtheta.deta(shape2, .lshape12, earg = .earg)
+ dl.dshape1 = digamma(shape1+ycounts) -
+ digamma(shape1+shape2+nvec) -
+ digamma(shape1) + digamma(shape1 + shape2)
+ dl.dshape2 = digamma(nvec + shape2 - ycounts) -
+ digamma(shape1 + shape2 + nvec) -
+ digamma(shape2) + digamma(shape1 + shape2)
+ (if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ cbind(dl.dshape1 * dshape1.deta,
+ dl.dshape2 * dshape2.deta)
}), list( .lshape12 = lshape12, .earg = earg ))),
weight = eval(substitute(expression({
- if (is.null( .nsimEIM)) {
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(2)
- wz[,iam(1,1,M)] = -(expected.betabin.ab(nvec,shape1,shape2,
- TRUE) -
- trigamma(shape1+shape2+nvec) -
- trigamma(shape1) + trigamma(shape1+shape2)) *
- dshape1.deta^2
- wz[,iam(2,2,M)] = -(expected.betabin.ab(nvec,shape1,shape2,
- FALSE) -
- trigamma(shape1+shape2+nvec) -
- trigamma(shape2) + trigamma(shape1+shape2)) *
- dshape2.deta^2
- wz[,iam(2,1,M)] = -(trigamma(shape1+shape2) -
- trigamma(shape1+shape2+nvec)) *
- dshape1.deta * dshape2.deta
- wz * (if (is.numeric(extra$orig.w)) extra$orig.w else 1)
+ if (is.null( .nsimEIM)) {
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(2)
+ wz[, iam(1, 1, M)] = -(expected.betabin.ab(nvec,shape1,shape2,
+ TRUE) -
+ trigamma(shape1+shape2+nvec) -
+ trigamma(shape1) + trigamma(shape1+shape2)) *
+ dshape1.deta^2
+ wz[, iam(2, 2, M)] = -(expected.betabin.ab(nvec,shape1,shape2,
+ FALSE) -
+ trigamma(shape1+shape2+nvec) -
+ trigamma(shape2) + trigamma(shape1+shape2)) *
+ dshape2.deta^2
+ wz[, iam(2, 1, M)] = -(trigamma(shape1+shape2) -
+ trigamma(shape1+shape2+nvec)) *
+ dshape1.deta * dshape2.deta
+ wz * (if (is.numeric(extra$orig.w)) extra$orig.w else 1)
} else {
- run.varcov = 0
- ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- dthetas.detas = cbind(dshape1.deta, dshape2.deta)
-
- for (ii in 1:( .nsimEIM )) {
- ysim = rbetabinom.ab(n = n, size = nvec, shape1 = shape1,
- shape2 = shape2)
- dl.dshape1 = digamma(shape1+ysim) -
- digamma(shape1+shape2+nvec) -
- digamma(shape1) + digamma(shape1+shape2)
- dl.dshape2 = digamma(nvec+shape2-ysim) -
- digamma(shape1+shape2+nvec) -
- digamma(shape2) + digamma(shape1+shape2)
- rm(ysim)
- temp3 = cbind(dl.dshape1, dl.dshape2) # n x M matrix
- run.varcov = ((ii-1) * run.varcov +
- temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
- }
- wz = if (intercept.only)
- matrix(colMeans(run.varcov),
- n, ncol(run.varcov), byrow = TRUE) else run.varcov
+ run.varcov = 0
+ ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ dthetas.detas = cbind(dshape1.deta, dshape2.deta)
+
+ for (ii in 1:( .nsimEIM )) {
+ ysim = rbetabinom.ab(n = n, size = nvec, shape1 = shape1,
+ shape2 = shape2)
+ checkargs = .checkargs
+ dl.dshape1 = digamma(shape1+ysim) -
+ digamma(shape1+shape2+nvec) -
+ digamma(shape1) + digamma(shape1+shape2)
+ dl.dshape2 = digamma(nvec+shape2-ysim) -
+ digamma(shape1+shape2+nvec) -
+ digamma(shape2) + digamma(shape1+shape2)
+ rm(ysim)
+ temp3 = cbind(dl.dshape1, dl.dshape2) # n x M matrix
+ run.varcov = ((ii-1) * run.varcov +
+ temp3[, ind1$row.index]*
+ temp3[, ind1$col.index]) / ii
+ }
+ wz = if (intercept.only)
+ matrix(colMeans(run.varcov),
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
- wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
- wz * (if (is.numeric(extra$orig.w)) extra$orig.w else 1)
+ wz = wz * dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
+ wz * (if (is.numeric(extra$orig.w)) extra$orig.w else 1)
}
}), list( .lshape12 = lshape12, .earg = earg,
.nsimEIM = nsimEIM ))))
@@ -1481,17 +1541,20 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
- betageometric = function(lprob = "logit", lshape = "loge",
- eprob = list(), eshape = list(),
- iprob = NULL, ishape = 0.1,
- moreSummation = c(2, 100),
- tolerance = 1.0e-10,
- zero = NULL)
+ betageometric <- function(lprob = "logit", lshape = "loge",
+ iprob = NULL, ishape = 0.1,
+ moreSummation = c(2, 100),
+ tolerance = 1.0e-10,
+ zero = NULL)
{
- if (mode(lprob) != "character" && mode(lprob) != "name")
- lprob = as.character(substitute(lprob))
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
+ lprob <- as.list(substitute(lprob))
+ eprob <- link2list(lprob)
+ lprob <- attr(eprob, "function.name")
+
+ lshape <- as.list(substitute(lshape))
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
+
if (!is.Numeric(ishape, positive = TRUE))
stop("bad input for argument 'ishape'")
@@ -1502,141 +1565,152 @@ betabinomial.ab.control <- function(save.weight = TRUE, ...) {
1.0 - tolerance >= 1.0)
stop("bad input for argument 'tolerance'")
- if (!is.list(eprob)) eprob = list()
- if (!is.list(eshape)) eshape = list()
- new("vglmff",
- blurb = c("Beta-geometric distribution\n",
- "Links: ",
- namesof("prob", lprob, earg = eprob), ", ",
- namesof("shape", lshape, earg = eshape)),
- constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- eval(geometric()@initialize)
- predictors.names =
- c(namesof("prob", .lprob, earg = .eprob, tag = FALSE),
- namesof("shape", .lshape, earg = .eshape, short = FALSE))
- if (length( .iprob))
- prob.init = rep( .iprob , len = n)
- if (!length(etastart) || ncol(cbind(etastart)) != 2) {
- shape.init = rep( .ishape , len = n)
- etastart = cbind(theta2eta(prob.init, .lprob, earg = .eprob),
- theta2eta(shape.init, .lshape, earg = .eshape))
- }
- }), list( .iprob=iprob, .ishape=ishape, .lprob = lprob,
- .eprob = eprob, .eshape = eshape,
- .lshape = lshape ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- prob = eta2theta(eta[,1], .lprob, earg = .eprob)
- shape = eta2theta(eta[,2], .lshape, earg = .eshape)
- mymu = (1-prob) / (prob - shape)
- ifelse(mymu >= 0, mymu, NA)
- }, list( .lprob = lprob, .lshape = lshape,
- .eprob = eprob, .eshape = eshape ))),
- last = eval(substitute(expression({
- misc$link = c("prob" = .lprob, "shape" = .lshape)
- misc$earg <- list(prob = .eprob, shape = .eshape)
- if (intercept.only) {
- misc$shape1 = shape1[1] # These quantities computed in @deriv
- misc$shape2 = shape2[1]
- }
- misc$expected = TRUE
- misc$tolerance = .tolerance
- misc$zero = .zero
- misc$moreSummation = .moreSummation
- }), list( .lprob = lprob, .lshape = lshape, .tolerance = tolerance,
- .eprob = eprob, .eshape = eshape,
- .moreSummation = moreSummation, .zero = zero ))),
- loglikelihood = eval(substitute(
- function(mu,y,w,residuals = FALSE,eta, extra = NULL) {
- prob = eta2theta(eta[,1], .lprob, earg = .eprob)
- shape = eta2theta(eta[,2], .lshape, earg = .eshape)
- ans = log(prob)
- maxy = max(y)
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- for (ii in 1:maxy) {
- index = ii <= y
- ans[index] = ans[index] + log1p(-prob[index]+(ii-1) *
- shape[index]) - log1p((ii-1)*shape[index])
- }
- ans = ans - log1p((y+1-1)*shape)
+ new("vglmff",
+ blurb = c("Beta-geometric distribution\n",
+ "Links: ",
+ namesof("prob", lprob, earg = eprob), ", ",
+ namesof("shape", lshape, earg = eshape)),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ eval(geometric()@initialize)
+ predictors.names =
+ c(namesof("prob", .lprob, earg = .eprob, tag = FALSE),
+ namesof("shape", .lshape, earg = .eshape, short = FALSE))
+
+ if (length( .iprob))
+ prob.init = rep( .iprob , len = n)
+
+ if (!length(etastart) ||
+ ncol(cbind(etastart)) != 2) {
+ shape.init = rep( .ishape , len = n)
+ etastart =
+ cbind(theta2eta(prob.init, .lprob, earg = .eprob),
+ theta2eta(shape.init, .lshape, earg = .eshape))
+ }
+ }), list( .iprob = iprob, .ishape = ishape, .lprob = lprob,
+ .eprob = eprob, .eshape = eshape,
+ .lshape = lshape ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ prob = eta2theta(eta[, 1], .lprob, earg = .eprob)
+ shape = eta2theta(eta[, 2], .lshape, earg = .eshape)
+ mymu = (1-prob) / (prob - shape)
+ ifelse(mymu >= 0, mymu, NA)
+ }, list( .lprob = lprob, .lshape = lshape,
+ .eprob = eprob, .eshape = eshape ))),
+ last = eval(substitute(expression({
+ misc$link = c("prob" = .lprob, "shape" = .lshape)
+ misc$earg <- list(prob = .eprob, shape = .eshape)
+ if (intercept.only) {
+ misc$shape1 = shape1[1] # These quantities computed in @deriv
+ misc$shape2 = shape2[1]
+ }
+ misc$expected = TRUE
+ misc$tolerance = .tolerance
+ misc$zero = .zero
+ misc$moreSummation = .moreSummation
+ }), list( .lprob = lprob, .lshape = lshape, .tolerance = tolerance,
+ .eprob = eprob, .eshape = eshape,
+ .moreSummation = moreSummation, .zero = zero ))),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals = FALSE,eta, extra = NULL) {
+ prob = eta2theta(eta[, 1], .lprob, earg = .eprob)
+ shape = eta2theta(eta[, 2], .lshape, earg = .eshape)
+ ans = log(prob)
+ maxy = max(y)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ for (ii in 1:maxy) {
+ index = ii <= y
+ ans[index] = ans[index] + log1p(-prob[index]+(ii-1) *
+ shape[index]) - log1p((ii-1)*shape[index])
+ }
+ ans = ans - log1p((y+1-1)*shape)
- sum(w * ans)
- }
- }, list( .lprob = lprob, .lshape = lshape,
- .eprob = eprob, .eshape = eshape ))),
- vfamily = c("betageometric"),
- deriv = eval(substitute(expression({
- prob = eta2theta(eta[,1], .lprob, earg = .eprob)
- shape = eta2theta(eta[,2], .lshape, earg = .eshape)
- shape1 = prob / shape; shape2 = (1-prob) / shape;
- dprob.deta = dtheta.deta(prob, .lprob, earg = .eprob)
- dshape.deta = dtheta.deta(shape, .lshape, earg = .eshape)
- dl.dprob = 1 / prob
- dl.dshape = 0 * y
- maxy = max(y)
- for (ii in 1:maxy) {
- index = ii <= y
- dl.dprob[index] = dl.dprob[index] -
- 1/(1-prob[index]+(ii-1)*shape[index])
- dl.dshape[index] = dl.dshape[index] +
- (ii-1)/(1-prob[index]+(ii-1)*shape[index]) -
- (ii-1)/(1+(ii-1)*shape[index])
- }
- dl.dshape = dl.dshape - (y+1 -1)/(1+(y+1 -1)*shape)
- c(w) * cbind(dl.dprob * dprob.deta,
- dl.dshape * dshape.deta)
- }), list( .lprob = lprob, .lshape = lshape,
- .eprob = eprob, .eshape = eshape ))),
- weight = eval(substitute(expression({
- wz = matrix(0, n, dimm(M)) #3=dimm(2)
- wz[,iam(1,1,M)] = 1 / prob^2
- moresum = .moreSummation
- maxsummation = round(maxy * moresum[1] + moresum[2])
- for (ii in 3:maxsummation) {
- temp7 = 1 - pbetageom(q=ii-1-1, shape1 = shape1, shape2 = shape2)
- denom1 = (1-prob+(ii-2)*shape)^2
- denom2 = (1+(ii-2)*shape)^2
- wz[,iam(1,1,M)] = wz[,iam(1,1,M)] + temp7 / denom1
- wz[,iam(1,2,M)] = wz[,iam(1,2,M)] - (ii-2) * temp7 / denom1
- wz[,iam(2,2,M)] = wz[,iam(2,2,M)] + (ii-2)^2 * temp7 / denom1 -
- (ii-1)^2 * temp7 / denom2
- if (max(temp7) < .tolerance ) break;
- }
- ii = 2
- temp7 = 1 - pbetageom(q=ii-1-1, shape1 = shape1, shape2 = shape2)
- denom1 = (1-prob+(ii-2)*shape)^2
- denom2 = (1+(ii-2)*shape)^2
- wz[,iam(1,1,M)] = wz[,iam(1,1,M)] + temp7 / denom1
- wz[,iam(2,2,M)] = wz[,iam(2,2,M)] - (ii-1)^2 * temp7 / denom2
- wz[,iam(1,1,M)] = wz[,iam(1,1,M)] * dprob.deta^2
- wz[,iam(2,2,M)] = wz[,iam(2,2,M)] * dshape.deta^2
- wz[,iam(2,1,M)] = wz[,iam(2,1,M)] * dprob.deta * dshape.deta
- c(w) * wz
- }), list( .lprob = lprob, .lshape = lshape,
- .eprob = eprob, .eshape = eshape,
- .moreSummation = moreSummation,
- .tolerance = tolerance ))))
+
+
+
+ sum(w * ans)
+ }
+ }, list( .lprob = lprob, .lshape = lshape,
+ .eprob = eprob, .eshape = eshape ))),
+ vfamily = c("betageometric"),
+ deriv = eval(substitute(expression({
+ prob = eta2theta(eta[, 1], .lprob, earg = .eprob)
+ shape = eta2theta(eta[, 2], .lshape, earg = .eshape)
+ shape1 = prob / shape; shape2 = (1-prob) / shape;
+ dprob.deta = dtheta.deta(prob, .lprob, earg = .eprob)
+ dshape.deta = dtheta.deta(shape, .lshape, earg = .eshape)
+ dl.dprob = 1 / prob
+ dl.dshape = 0 * y
+ maxy = max(y)
+ for (ii in 1:maxy) {
+ index = ii <= y
+ dl.dprob[index] = dl.dprob[index] -
+ 1/(1-prob[index]+(ii-1)*shape[index])
+ dl.dshape[index] = dl.dshape[index] +
+ (ii-1)/(1-prob[index]+(ii-1)*shape[index]) -
+ (ii-1)/(1+(ii-1)*shape[index])
+ }
+ dl.dshape = dl.dshape - (y+1 -1)/(1+(y+1 -1)*shape)
+ c(w) * cbind(dl.dprob * dprob.deta,
+ dl.dshape * dshape.deta)
+ }), list( .lprob = lprob, .lshape = lshape,
+ .eprob = eprob, .eshape = eshape ))),
+ weight = eval(substitute(expression({
+ wz = matrix(0, n, dimm(M)) #3=dimm(2)
+ wz[, iam(1, 1, M)] = 1 / prob^2
+ moresum = .moreSummation
+ maxsummation = round(maxy * moresum[1] + moresum[2])
+ for (ii in 3:maxsummation) {
+ temp7 = 1 - pbetageom(q = ii-1-1, shape1 = shape1,
+ shape2 = shape2)
+ denom1 = (1-prob+(ii-2)*shape)^2
+ denom2 = (1+(ii-2)*shape)^2
+ wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] + temp7 / denom1
+ wz[, iam(1, 2, M)] = wz[, iam(1, 2, M)] - (ii-2) * temp7 / denom1
+ wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)] + (ii-2)^2 * temp7 / denom1 -
+ (ii-1)^2 * temp7 / denom2
+ if (max(temp7) < .tolerance ) break;
+ }
+ ii = 2
+ temp7 = 1 - pbetageom(q=ii-1-1, shape1 = shape1, shape2 = shape2)
+ denom1 = (1-prob+(ii-2)*shape)^2
+ denom2 = (1+(ii-2)*shape)^2
+
+ wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] + temp7 / denom1
+ wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)] - (ii-1)^2 * temp7 / denom2
+ wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] * dprob.deta^2
+ wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)] * dshape.deta^2
+ wz[, iam(2, 1, M)] = wz[, iam(2, 1, M)] * dprob.deta * dshape.deta
+ c(w) * wz
+ }), list( .lprob = lprob, .lshape = lshape,
+ .eprob = eprob, .eshape = eshape,
+ .moreSummation = moreSummation,
+ .tolerance = tolerance ))))
}
-seq2binomial = function(lprob1 = "logit", lprob2 = "logit",
- eprob1 = list(), eprob2 = list(),
- iprob1 = NULL, iprob2 = NULL,
- zero = NULL)
+seq2binomial <- function(lprob1 = "logit", lprob2 = "logit",
+ iprob1 = NULL, iprob2 = NULL,
+ zero = NULL)
{
- if (mode(lprob1) != "character" && mode(lprob1) != "name")
- lprob1 = as.character(substitute(lprob1))
- if (mode(lprob2) != "character" && mode(lprob2) != "name")
- lprob2 = as.character(substitute(lprob2))
+ lprob1 <- as.list(substitute(lprob1))
+ eprob1 <- link2list(lprob1)
+ lprob1 <- attr(eprob1, "function.name")
+
+ lprob2 <- as.list(substitute(lprob2))
+ eprob2 <- link2list(lprob2)
+ lprob2 <- attr(eprob2, "function.name")
+
+
if (length(iprob1) &&
(!is.Numeric(iprob1, positive = TRUE) ||
@@ -1647,8 +1721,6 @@ seq2binomial = function(lprob1 = "logit", lprob2 = "logit",
max(iprob2) >= 1))
stop("bad input for argument 'iprob2'")
- if (!is.list(eprob1)) eprob1 = list()
- if (!is.list(eprob2)) eprob2 = list()
new("vglmff",
@@ -1664,31 +1736,34 @@ seq2binomial = function(lprob1 = "logit", lprob2 = "logit",
stop("the 'weights' argument must be a vector")
if (any(abs(w - round(w)) > 0.000001))
stop("the 'weights' argument does not seem to be integer-valued")
+
if (ncol(y <- cbind(y)) != 2)
stop("the response must be a 2-column matrix")
if (any(y < 0 | y > 1))
stop("the response must have values between 0 and 1")
w = round(w)
- rvector = w * y[,1]
+ rvector = w * y[, 1]
if (any(abs(rvector - round(rvector)) > 1.0e-8))
warning("number of successes in column one ",
"should be integer-valued")
- svector = rvector * y[,2]
+ svector = rvector * y[, 2]
if (any(abs(svector - round(svector)) > 1.0e-8))
warning("number of successes in",
" column two should be integer-valued")
predictors.names =
- c(namesof("prob1", .lprob1,earg= .eprob1, tag = FALSE),
- namesof("prob2", .lprob2,earg= .eprob2, tag = FALSE))
+ c(namesof("prob1", .lprob1,earg = .eprob1, tag = FALSE),
+ namesof("prob2", .lprob2,earg = .eprob2, tag = FALSE))
+
prob1.init = if (is.Numeric( .iprob1))
rep( .iprob1 , len = n) else
- rep(weighted.mean(y[,1], w = w), len = n)
+ rep(weighted.mean(y[, 1], w = w), len = n)
prob2.init = if (is.Numeric( .iprob2 ))
rep( .iprob2 , length = n) else
- rep(weighted.mean(y[,2], w = w*y[,1]),
+ rep(weighted.mean(y[, 2], w = w*y[, 1]),
length = n)
+
if (!length(etastart)) {
etastart =
cbind(theta2eta(prob1.init, .lprob1, earg = .eprob1),
@@ -1698,31 +1773,32 @@ seq2binomial = function(lprob1 = "logit", lprob2 = "logit",
.lprob1 = lprob1, .lprob2 = lprob2,
.eprob1 = eprob1, .eprob2 = eprob2 ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- prob1 = eta2theta(eta[,1], .lprob1, earg = .eprob1)
- prob2 = eta2theta(eta[,2], .lprob2, earg = .eprob2)
+ prob1 = eta2theta(eta[, 1], .lprob1, earg = .eprob1)
+ prob2 = eta2theta(eta[, 2], .lprob2, earg = .eprob2)
cbind(prob1, prob2)
}, list( .lprob1 = lprob1, .lprob2 = lprob2,
.eprob1 = eprob1, .eprob2 = eprob2 ))),
last = eval(substitute(expression({
- misc$link = c("prob1" = .lprob1, "prob2" = .lprob2)
- misc$earg <- list(prob1 = .eprob1, prob2 = .eprob2)
- misc$expected = TRUE
- misc$zero = .zero
+ misc$link = c("prob1" = .lprob1, "prob2" = .lprob2)
+ misc$earg <- list(prob1 = .eprob1, prob2 = .eprob2)
+
+ misc$expected = TRUE
+ misc$zero = .zero
}), list( .lprob1 = lprob1, .lprob2 = lprob2,
.eprob1 = eprob1, .eprob2 = eprob2,
.zero = zero ))),
loglikelihood = eval(substitute(
function(mu,y,w,residuals = FALSE,eta, extra = NULL) {
- prob1 = eta2theta(eta[,1], .lprob1, earg = .eprob1)
- prob2 = eta2theta(eta[,2], .lprob2, earg = .eprob2)
+ prob1 = eta2theta(eta[, 1], .lprob1, earg = .eprob1)
+ prob2 = eta2theta(eta[, 2], .lprob2, earg = .eprob2)
smallno = 100 * .Machine$double.eps
prob1 = pmax(prob1, smallno)
prob1 = pmin(prob1, 1-smallno)
prob2 = pmax(prob2, smallno)
prob2 = pmin(prob2, 1-smallno)
mvector = w
- rvector = w * y[,1]
- svector = rvector * y[,2]
+ rvector = w * y[, 1]
+ svector = rvector * y[, 2]
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
sum(rvector * log(prob1) + (mvector-rvector) * log1p(-prob1) +
@@ -1732,8 +1808,8 @@ seq2binomial = function(lprob1 = "logit", lprob2 = "logit",
.eprob1 = eprob1, .eprob2 = eprob2 ))),
vfamily = c("seq2binomial"),
deriv = eval(substitute(expression({
- prob1 = eta2theta(eta[,1], .lprob1, earg = .eprob1)
- prob2 = eta2theta(eta[,2], .lprob2, earg = .eprob2)
+ prob1 = eta2theta(eta[, 1], .lprob1, earg = .eprob1)
+ prob2 = eta2theta(eta[, 2], .lprob2, earg = .eprob2)
smallno = 100 * .Machine$double.eps
prob1 = pmax(prob1, smallno)
prob1 = pmin(prob1, 1-smallno)
@@ -1743,8 +1819,8 @@ seq2binomial = function(lprob1 = "logit", lprob2 = "logit",
dprob2.deta = dtheta.deta(prob2, .lprob2, earg = .eprob2)
mvector = w
- rvector = w * y[,1]
- svector = rvector * y[,2]
+ rvector = w * y[, 1]
+ svector = rvector * y[, 2]
dl.dprob1 = rvector / prob1 - (mvector-rvector) / (1-prob1)
dl.dprob2 = svector / prob2 - (rvector-svector) / (1-prob2)
@@ -1754,8 +1830,8 @@ seq2binomial = function(lprob1 = "logit", lprob2 = "logit",
.eprob1 = eprob1, .eprob2 = eprob2 ))),
weight = eval(substitute(expression({
wz = matrix(0, n, M)
- wz[,iam(1,1,M)] = (dprob1.deta^2) / (prob1 * (1-prob1))
- wz[,iam(2,2,M)] = (dprob2.deta^2) * prob1 / (prob2 * (1-prob2))
+ wz[, iam(1, 1, M)] = (dprob1.deta^2) / (prob1 * (1-prob1))
+ wz[, iam(2, 2, M)] = (dprob2.deta^2) * prob1 / (prob2 * (1-prob2))
c(w) * wz
}), list( .lprob1 = lprob1, .lprob2 = lprob2,
.eprob1 = eprob1, .eprob2 = eprob2 ))))
@@ -1763,19 +1839,28 @@ seq2binomial = function(lprob1 = "logit", lprob2 = "logit",
- zipebcom = function(lmu12 = "cloglog",
- lphi12 = "logit", loratio = "loge",
- emu12 = list(), ephi12 = list(),
- eoratio = list(),
- imu12 = NULL, iphi12 = NULL,
- ioratio = NULL,
- zero = 2:3, tol = 0.001, addRidge = 0.001)
+ zipebcom <- function(lmu12 = "cloglog",
+ lphi12 = "logit",
+ loratio = "loge",
+ imu12 = NULL, iphi12 = NULL,
+ ioratio = NULL,
+ zero = 2:3, tol = 0.001, addRidge = 0.001)
{
- if (mode(lphi12) != "character" && mode(lphi12) != "name")
- lphi12 = as.character(substitute(lphi12))
- if (mode(loratio) != "character" && mode(loratio) != "name")
- loratio = as.character(substitute(loratio))
+
+ lmu12 <- as.list(substitute(lmu12))
+ emu12 <- link2list(lmu12)
+ lmu12 <- attr(emu12, "function.name")
+
+ lphi12 <- as.list(substitute(lphi12))
+ ephi12 <- link2list(lphi12)
+ lphi12 <- attr(ephi12, "function.name")
+
+ loratio <- as.list(substitute(loratio))
+ eoratio <- link2list(loratio)
+ loratio <- attr(eoratio, "function.name")
+
+
if (!is.Numeric(tol, positive = TRUE, allowable.length = 1) ||
tol > 0.1)
@@ -1784,154 +1869,156 @@ seq2binomial = function(lprob1 = "logit", lprob2 = "logit",
addRidge > 0.5)
stop("bad input for argument 'addRidge'")
- if (!is.list(emu12)) emu12 = list()
- if (!is.list(ephi12)) ephi12 = list()
- if (!is.list(eoratio)) eoratio = list()
-
if (lmu12 != "cloglog")
warning("argument 'lmu12' should be 'cloglog'")
- new("vglmff",
- blurb = c("Exchangeable bivariate ", lmu12,
- " odds-ratio model based on\n",
- "a zero-inflated Poisson distribution\n\n",
- "Links: ",
- namesof("mu12", lmu12, earg = emu12), ", ",
- namesof("phi12", lphi12, earg = ephi12), ", ",
- namesof("oratio", loratio, earg = eoratio)),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ new("vglmff",
+ blurb = c("Exchangeable bivariate ", lmu12,
+ " odds-ratio model based on\n",
+ "a zero-inflated Poisson distribution\n\n",
+ "Links: ",
+ namesof("mu12", lmu12, earg = emu12), ", ",
+ namesof("phi12", lphi12, earg = ephi12), ", ",
+ namesof("oratio", loratio, earg = eoratio)),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- eval(process.binomial2.data.vgam)
- predictors.names = c(
- namesof("mu12", .lmu12, earg = .emu12, short = TRUE),
- namesof("phi12", .lphi12, earg = .ephi12, short = TRUE),
- namesof("oratio", .loratio, earg = .eoratio, short = TRUE))
-
- propY1.eq.0 = weighted.mean(y[,'00'], w) + weighted.mean(y[,'01'], w)
- propY2.eq.0 = weighted.mean(y[,'00'], w) + weighted.mean(y[,'10'], w)
- if (length( .iphi12) && any( .iphi12 > propY1.eq.0))
- warning("iphi12 must be less than the sample proportion of Y1==0")
- if (length( .iphi12) && any( .iphi12 > propY2.eq.0))
- warning("iphi12 must be less than the sample proportion of Y2==0")
-
- if (!length(etastart)) {
- pstar.init = ((mu[,3]+mu[,4]) + (mu[,2]+mu[,4])) / 2
- phi.init = if (length(.iphi12)) rep(.iphi12, len = n) else
- min(propY1.eq.0 * 0.95, propY2.eq.0 * 0.95, pstar.init/1.5)
- oratio.init = if (length( .ioratio)) rep( .ioratio, len = n) else
- mu[,4]*mu[,1]/(mu[,2]*mu[,3])
- mu12.init = if (length(.imu12)) rep(.imu12, len = n) else
- pstar.init / (1-phi.init)
- etastart = cbind(
- theta2eta(mu12.init, .lmu12, earg = .emu12),
- theta2eta(phi.init, .lphi12, earg = .ephi12),
- theta2eta(oratio.init, .loratio, earg = .eoratio))
- }
- }), list( .lmu12 = lmu12, .lphi12 = lphi12, .loratio = loratio,
- .emu12 = emu12, .ephi12 = ephi12, .eoratio = eoratio,
- .imu12 = imu12, .iphi12 = iphi12, .ioratio = ioratio ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- A1vec = eta2theta(eta[,1], .lmu12, earg = .emu12)
- phivec = eta2theta(eta[,2], .lphi12, earg = .ephi12)
- pmargin = matrix((1 - phivec) * A1vec, nrow(eta), 2)
- oratio = eta2theta(eta[,3], .loratio, earg = .eoratio)
- a.temp = 1 + (pmargin[,1]+pmargin[,2])*(oratio-1)
- b.temp = -4 * oratio * (oratio-1) * pmargin[,1] * pmargin[,2]
- temp = sqrt(a.temp^2 + b.temp)
- pj4 = ifelse(abs(oratio-1) < .tol, pmargin[,1]*pmargin[,2],
- (a.temp-temp)/(2*(oratio-1)))
- pj2 = pmargin[,2] - pj4
- pj3 = pmargin[,1] - pj4
- cbind("00" = 1-pj4-pj2-pj3, "01" = pj2, "10" = pj3, "11" = pj4)
- }, list( .tol = tol,
- .lmu12 = lmu12, .lphi12 = lphi12, .loratio = loratio,
- .emu12 = emu12, .ephi12 = ephi12, .eoratio = eoratio ))),
- last = eval(substitute(expression({
- misc$link = c("mu12"= .lmu12, "phi12" = .lphi12, "oratio"= .loratio)
- misc$earg = list("mu12"= .emu12, "phi12"= .ephi12, "oratio"= .eoratio)
- misc$tol = .tol
- misc$expected = TRUE
- misc$addRidge = .addRidge
- }), list( .tol = tol, .addRidge = addRidge,
- .lmu12 = lmu12, .lphi12 = lphi12, .loratio = loratio,
- .emu12 = emu12, .ephi12 = ephi12, .eoratio = eoratio ))),
- loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
+ initialize = eval(substitute(expression({
+ eval(process.binomial2.data.vgam)
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
+ predictors.names = c(
+ namesof("mu12", .lmu12, earg = .emu12, short = TRUE),
+ namesof("phi12", .lphi12, earg = .ephi12, short = TRUE),
+ namesof("oratio", .loratio, earg = .eoratio, short = TRUE))
- smallno = 1.0e4 * .Machine$double.eps
- if (max(abs(ycounts - round(ycounts))) > smallno)
- warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts = round(ycounts)
+ propY1.eq.0 = weighted.mean(y[,'00'], w) + weighted.mean(y[,'01'], w)
+ propY2.eq.0 = weighted.mean(y[,'00'], w) + weighted.mean(y[,'10'], w)
+ if (length( .iphi12) && any( .iphi12 > propY1.eq.0))
+ warning("iphi12 must be less than the sample proportion of Y1==0")
+ if (length( .iphi12) && any( .iphi12 > propY2.eq.0))
+ warning("iphi12 must be less than the sample proportion of Y2==0")
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, dochecking = FALSE))
- },
- vfamily = c("zipebcom"),
- deriv = eval(substitute(expression({
- A1vec = eta2theta(eta[,1], .lmu12, earg = .emu12)
- smallno = .Machine$double.eps^(2/4)
- A1vec[A1vec > 1.0 -smallno] = 1.0 - smallno
-
- phivec = eta2theta(eta[,2], .lphi12, earg = .ephi12)
- pmargin = matrix((1 - phivec) * A1vec, n, 2)
- oratio = eta2theta(eta[,3], .loratio, earg = .eoratio)
-
- Vab = 1 / (1/mu[,1] + 1/mu[,2] + 1/mu[,3] + 1/mu[,4])
- Vabc = 1/mu[,1] + 1/mu[,2]
- denom3 = 2 * oratio * mu[,2] + mu[,1] + mu[,4]
- temp1 = oratio * mu[,2] + mu[,4]
- dp11star.dp1unstar = 2*(1-phivec)*Vab * Vabc
- dp11star.dphi1 = -2 * A1vec * Vab * Vabc
- dp11star.doratio = Vab / oratio
- yandmu = (y[,1]/mu[,1] - y[,2]/mu[,2] - y[,3]/mu[,3] +
- y[,4]/mu[,4])
- dp11.doratio = Vab / oratio
- check.dl.doratio = yandmu * dp11.doratio
-
- cyandmu = (y[,2]+y[,3])/mu[,2] - 2 * y[,1]/mu[,1]
- dl.dmu1 = dp11star.dp1unstar * yandmu + (1-phivec) * cyandmu
- dl.dphi1 = dp11star.dphi1 * yandmu - A1vec * cyandmu
- dl.doratio = check.dl.doratio
- dthetas.detas =
- cbind(dtheta.deta(A1vec, .lmu12, earg = .emu12),
- dtheta.deta(phivec, .lphi12, earg = .ephi12),
- dtheta.deta(oratio, .loratio, earg = .eoratio))
- c(w) * cbind(dl.dmu1,
- dl.dphi1,
- dl.doratio) * dthetas.detas
- }), list( .lmu12 = lmu12, .lphi12 = lphi12, .loratio = loratio,
- .emu12 = emu12, .ephi12 = ephi12, .eoratio = eoratio ))),
- weight = eval(substitute(expression({
- wz = matrix(0, n, 4)
- alternwz11 = 2*(1-phivec)^2 *(2/mu[,1] + 1/mu[,2] - 2*Vab*Vabc^2) *
- (dthetas.detas[,1])^2
- wz[,iam(1,1,M)] = alternwz11
-
- alternwz22 = 2* A1vec^2 *(2/mu[,1] + 1/mu[,2] - 2*Vab*Vabc^2) *
- (dthetas.detas[,2])^2
- wz[,iam(2,2,M)] = alternwz22
-
- alternwz12 = -2*A1vec*(1-phivec)*
- (2/mu[,1] + 1/mu[,2] - 2*Vab*Vabc^2) *
- dthetas.detas[,1] * dthetas.detas[,2]
- wz[,iam(1,2,M)] = alternwz12
-
- alternwz33 = (Vab / oratio^2) * dthetas.detas[,3]^2
- wz[,iam(3,3,M)] = alternwz33
-
- wz[,1:2] = wz[,1:2] * (1 + .addRidge)
- c(w) * wz
- }), list( .addRidge = addRidge ))))
+ if (!length(etastart)) {
+ pstar.init = ((mu[, 3]+mu[, 4]) + (mu[, 2]+mu[, 4])) / 2
+ phi.init = if (length(.iphi12)) rep(.iphi12, len = n) else
+ min(propY1.eq.0 * 0.95, propY2.eq.0 * 0.95, pstar.init/1.5)
+ oratio.init = if (length( .ioratio)) rep( .ioratio, len = n) else
+ mu[, 4]*mu[, 1]/(mu[, 2]*mu[, 3])
+ mu12.init = if (length(.imu12)) rep(.imu12, len = n) else
+ pstar.init / (1-phi.init)
+
+ etastart = cbind(
+ theta2eta(mu12.init, .lmu12, earg = .emu12),
+ theta2eta(phi.init, .lphi12, earg = .ephi12),
+ theta2eta(oratio.init, .loratio, earg = .eoratio))
+ }
+ }), list( .lmu12 = lmu12, .lphi12 = lphi12, .loratio = loratio,
+ .emu12 = emu12, .ephi12 = ephi12, .eoratio = eoratio,
+ .imu12 = imu12, .iphi12 = iphi12, .ioratio = ioratio ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ A1vec = eta2theta(eta[, 1], .lmu12, earg = .emu12)
+ phivec = eta2theta(eta[, 2], .lphi12, earg = .ephi12)
+ pmargin = matrix((1 - phivec) * A1vec, nrow(eta), 2)
+ oratio = eta2theta(eta[, 3], .loratio, earg = .eoratio)
+ a.temp = 1 + (pmargin[, 1]+pmargin[, 2])*(oratio-1)
+ b.temp = -4 * oratio * (oratio-1) * pmargin[, 1] * pmargin[, 2]
+ temp = sqrt(a.temp^2 + b.temp)
+ pj4 = ifelse(abs(oratio-1) < .tol, pmargin[, 1]*pmargin[, 2],
+ (a.temp-temp)/(2*(oratio-1)))
+ pj2 = pmargin[, 2] - pj4
+ pj3 = pmargin[, 1] - pj4
+ cbind("00" = 1-pj4-pj2-pj3, "01" = pj2, "10" = pj3, "11" = pj4)
+ }, list( .tol = tol,
+ .lmu12 = lmu12, .lphi12 = lphi12, .loratio = loratio,
+ .emu12 = emu12, .ephi12 = ephi12, .eoratio = eoratio ))),
+ last = eval(substitute(expression({
+ misc$link = c("mu12"= .lmu12 , "phi12" = .lphi12,
+ "oratio" = .loratio)
+ misc$earg = list("mu12"= .emu12 , "phi12" = .ephi12,
+ "oratio" = .eoratio)
+
+ misc$tol = .tol
+ misc$expected = TRUE
+ misc$addRidge = .addRidge
+ }), list( .tol = tol, .addRidge = addRidge,
+ .lmu12 = lmu12, .lphi12 = lphi12, .loratio = loratio,
+ .emu12 = emu12, .ephi12 = ephi12, .eoratio = eoratio ))),
+ loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+
+ ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+ nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+
+ smallno = 1.0e4 * .Machine$double.eps
+ if (max(abs(ycounts - round(ycounts))) > smallno)
+ warning("converting 'ycounts' to integer in @loglikelihood")
+ ycounts = round(ycounts)
+
+ sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dmultinomial(x = ycounts, size = nvec, prob = mu,
+ log = TRUE, dochecking = FALSE))
+ },
+ vfamily = c("zipebcom"),
+ deriv = eval(substitute(expression({
+ A1vec = eta2theta(eta[, 1], .lmu12, earg = .emu12)
+ smallno = .Machine$double.eps^(2/4)
+ A1vec[A1vec > 1.0 -smallno] = 1.0 - smallno
+
+ phivec = eta2theta(eta[, 2], .lphi12, earg = .ephi12)
+ pmargin = matrix((1 - phivec) * A1vec, n, 2)
+ oratio = eta2theta(eta[, 3], .loratio, earg = .eoratio)
+
+ Vab = 1 / (1/mu[, 1] + 1/mu[, 2] + 1/mu[, 3] + 1/mu[, 4])
+ Vabc = 1/mu[, 1] + 1/mu[, 2]
+ denom3 = 2 * oratio * mu[, 2] + mu[, 1] + mu[, 4]
+ temp1 = oratio * mu[, 2] + mu[, 4]
+ dp11star.dp1unstar = 2*(1-phivec)*Vab * Vabc
+ dp11star.dphi1 = -2 * A1vec * Vab * Vabc
+ dp11star.doratio = Vab / oratio
+ yandmu = (y[, 1]/mu[, 1] - y[, 2]/mu[, 2] - y[, 3]/mu[, 3] +
+ y[, 4]/mu[, 4])
+ dp11.doratio = Vab / oratio
+ check.dl.doratio = yandmu * dp11.doratio
+
+ cyandmu = (y[, 2]+y[, 3])/mu[, 2] - 2 * y[, 1]/mu[, 1]
+ dl.dmu1 = dp11star.dp1unstar * yandmu + (1-phivec) * cyandmu
+ dl.dphi1 = dp11star.dphi1 * yandmu - A1vec * cyandmu
+ dl.doratio = check.dl.doratio
+ dthetas.detas =
+ cbind(dtheta.deta(A1vec, .lmu12, earg = .emu12),
+ dtheta.deta(phivec, .lphi12, earg = .ephi12),
+ dtheta.deta(oratio, .loratio, earg = .eoratio))
+ c(w) * cbind(dl.dmu1,
+ dl.dphi1,
+ dl.doratio) * dthetas.detas
+ }), list( .lmu12 = lmu12, .lphi12 = lphi12, .loratio = loratio,
+ .emu12 = emu12, .ephi12 = ephi12, .eoratio = eoratio ))),
+ weight = eval(substitute(expression({
+ wz = matrix(0, n, 4)
+ alternwz11 = 2 * (1-phivec)^2 *
+ (2/mu[, 1] + 1/mu[, 2] - 2*Vab*Vabc^2) *
+ (dthetas.detas[, 1])^2
+ wz[, iam(1, 1, M)] = alternwz11
+
+ alternwz22 = 2* A1vec^2 *(2/mu[, 1] + 1/mu[, 2] - 2*Vab*Vabc^2) *
+ (dthetas.detas[, 2])^2
+ wz[, iam(2, 2, M)] = alternwz22
+
+ alternwz12 = -2*A1vec*(1-phivec)*
+ (2/mu[, 1] + 1/mu[, 2] - 2*Vab*Vabc^2) *
+ dthetas.detas[, 1] * dthetas.detas[, 2]
+ wz[, iam(1, 2, M)] = alternwz12
+
+ alternwz33 = (Vab / oratio^2) * dthetas.detas[, 3]^2
+ wz[, iam(3, 3, M)] = alternwz33
+
+ wz[, 1:2] = wz[, 1:2] * (1 + .addRidge)
+ c(w) * wz
+ }), list( .addRidge = addRidge ))))
}
@@ -1945,15 +2032,19 @@ if (FALSE)
lusted68 <- function(lrhopos = "loge", lrhoneg = "loge",
erhopos = list(), erhoneg = list(),
irhopos = NULL, irhoneg = NULL,
- iprob1 = NULL, iprob2 = NULL, zero = NULL)
+ iprob1 = NULL, iprob2 = NULL,
+ zero = NULL)
{
print("hi 20100603")
- if (mode(lrhopos) != "character" && mode(lrhopos) != "name")
- lrhopos = as.character(substitute(lrhopos))
- if (mode(lrhoneg) != "character" && mode(lrhoneg) != "name")
- lrhoneg = as.character(substitute(lrhoneg))
- if (!is.list(erhopos)) erhopos = list()
- if (!is.list(erhoneg)) erhoneg = list()
+
+ lrhopos <- as.list(substitute(lrhopos))
+ erhopos <- link2list(lrhopos)
+ lrhopos <- attr(erhopos, "function.name")
+
+ lrhoneg <- as.list(substitute(lrhoneg))
+ erhoneg <- link2list(lrhoneg)
+ lrhoneg <- attr(erhoneg, "function.name")
+
new("vglmff",
blurb = c("Lusted (1968)'s model\n",
@@ -1974,43 +2065,43 @@ if (FALSE)
- predictors.names = c(
- namesof("rhopos", .lrhopos, earg = .erhopos, short = TRUE),
- namesof("rhoneg", .lrhoneg, earg = .erhoneg, short = TRUE))
+ predictors.names = c(
+ namesof("rhopos", .lrhopos, earg = .erhopos, short = TRUE),
+ namesof("rhoneg", .lrhoneg, earg = .erhoneg, short = TRUE))
- if (!length(etastart)) {
- nnn1 = round(w * (y[, 1] + y[, 2]))
- nnn2 = round(w * (y[, 3] + y[, 4]))
+ if (!length(etastart)) {
+ nnn1 = round(w * (y[, 1] + y[, 2]))
+ nnn2 = round(w * (y[, 3] + y[, 4]))
print("head(nnn1, 3)")
print( head(nnn1, 3) )
print("head(nnn2, 3)")
print( head(nnn2, 3) )
- init.pee1 = if (length( .iprob1 )) rep( .iprob1 , len = n) else
- mu[, 1] / (mu[, 1] + mu[, 2])
- init.pee2 = if (length( .iprob2 )) rep( .iprob2 , len = n) else
- mu[, 3] / (mu[, 3] + mu[, 4])
- init.rhopos = pmax(1.1, init.pee1 / init.pee2) # Should be > 1
- init.rhoneg = pmin(0.4, (1 - init.pee1) / (1 - init.pee2)) # c. 0
+ init.pee1 = if (length( .iprob1 )) rep( .iprob1 , len = n) else
+ mu[, 1] / (mu[, 1] + mu[, 2])
+ init.pee2 = if (length( .iprob2 )) rep( .iprob2 , len = n) else
+ mu[, 3] / (mu[, 3] + mu[, 4])
+ init.rhopos = pmax(1.1, init.pee1 / init.pee2) # Should be > 1
+ init.rhoneg = pmin(0.4, (1 - init.pee1) / (1 - init.pee2)) # c. 0
print("head(init.rhopos, 3)")
print( head(init.rhopos, 3) )
print("head(init.rhoneg, 3)")
print( head(init.rhoneg, 3) )
- if (length( .irhopos)) init.rhopos = rep( .irhopos , len = n)
- if (length( .irhoneg)) init.rhoneg = rep( .irhoneg , len = n)
- etastart = cbind(theta2eta(init.rhopos, .lrhopos, earg = .erhopos),
- theta2eta(init.rhoneg, .lrhoneg, earg = .erhoneg))
+ if (length( .irhopos)) init.rhopos = rep( .irhopos , len = n)
+ if (length( .irhoneg)) init.rhoneg = rep( .irhoneg , len = n)
+ etastart = cbind(theta2eta(init.rhopos, .lrhopos, earg = .erhopos),
+ theta2eta(init.rhoneg, .lrhoneg, earg = .erhoneg))
print("etastart[1:3,]")
print( etastart[1:3,] )
- }
- }), list( .lrhopos = lrhopos, .lrhoneg = lrhoneg,
+ }
+ }), list( .lrhopos = lrhopos, .lrhoneg = lrhoneg,
.erhopos = erhopos, .erhoneg = erhoneg,
.iprob1 = iprob1, .iprob2 = iprob2,
.irhopos = irhopos, .irhoneg = irhoneg ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- rhopos = eta2theta(eta[,1], .lrhopos, earg = .erhopos)
- rhoneg = eta2theta(eta[,2], .lrhoneg, earg = .erhoneg)
+ rhopos = eta2theta(eta[, 1], .lrhopos, earg = .erhopos)
+ rhoneg = eta2theta(eta[, 2], .lrhoneg, earg = .erhoneg)
pee2 = (1 - rhoneg) / (rhopos - rhoneg)
pee1 = pee2 * rhopos
cbind(rhopos, rhoneg, "mu1" = pee1, "mu2" = pee2)
@@ -2025,8 +2116,8 @@ if (FALSE)
.irhopos = irhopos, .irhoneg = irhoneg ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- rhopos = eta2theta(eta[,1], .lrhopos, earg = .erhopos)
- rhoneg = eta2theta(eta[,2], .lrhoneg, earg = .erhoneg)
+ rhopos = eta2theta(eta[, 1], .lrhopos, earg = .erhopos)
+ rhoneg = eta2theta(eta[, 2], .lrhoneg, earg = .erhoneg)
pee2 = (1 - rhoneg) / (rhopos - rhoneg)
pee1 = pee2 * rhopos
if (min(pee1) <= 0.5) {
@@ -2077,8 +2168,8 @@ if (FALSE)
.irhopos = irhopos, .irhoneg = irhoneg ))),
vfamily = c("lusted68", "binom2"),
deriv = eval(substitute(expression({
- rhopos = eta2theta(eta[,1], .lrhopos, earg = .erhopos)
- rhoneg = eta2theta(eta[,2], .lrhoneg, earg = .erhoneg)
+ rhopos = eta2theta(eta[, 1], .lrhopos, earg = .erhopos)
+ rhoneg = eta2theta(eta[, 2], .lrhoneg, earg = .erhoneg)
pee2 = (1 - rhoneg) / (rhopos - rhoneg)
pee1 = pee2 * rhopos
nnn1 = round(w * (y[, 1] + y[, 3]))
@@ -2136,7 +2227,7 @@ if (FALSE)
- binom2.Rho = function(rho = 0, imu1 = NULL, imu2 = NULL,
+ binom2.Rho <- function(rho = 0, imu1 = NULL, imu2 = NULL,
exchangeable = FALSE, nsimEIM = NULL)
{
lmu12 = "probit"
@@ -2171,22 +2262,22 @@ if (FALSE)
if (is.null(etastart)) {
mu1.init= if (is.Numeric(.imu1))
rep(.imu1, length = n) else
- mu[,3] + mu[,4]
+ mu[, 3] + mu[, 4]
mu2.init= if (is.Numeric(.imu2))
rep(.imu2, length = n) else
- mu[,2] + mu[,4]
+ mu[, 2] + mu[, 4]
etastart = cbind(theta2eta(mu1.init, .lmu12, earg = .emu12),
theta2eta(mu2.init, .lmu12, earg = .emu12))
}
}), list( .lmu12 = lmu12, .emu12 = emu12, .nsimEIM = nsimEIM,
.imu1 = imu1, .imu2 = imu2 ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- pmargin = cbind(eta2theta(eta[,1], .lmu12, earg = .emu12),
- eta2theta(eta[,2], .lmu12, earg = .emu12))
+ pmargin = cbind(eta2theta(eta[, 1], .lmu12, earg = .emu12),
+ eta2theta(eta[, 2], .lmu12, earg = .emu12))
rhovec = rep( .rho , len = nrow(eta))
- p11 = pnorm2(eta[,1], eta[,2], rhovec)
- p01 = pmin(pmargin[,2] - p11, pmargin[,2])
- p10 = pmin(pmargin[,1] - p11, pmargin[,1])
+ p11 = pnorm2(eta[, 1], eta[, 2], rhovec)
+ p01 = pmin(pmargin[, 2] - p11, pmargin[, 2])
+ p10 = pmin(pmargin[, 1] - p11, pmargin[, 1])
p00 = 1 - p01 - p10 - p11
ansmat = abs(cbind("00"=p00, "01"=p01, "10"=p10, "11"=p11))
ansmat / rowSums(ansmat)
@@ -2220,69 +2311,74 @@ if (FALSE)
}, list( .rho = rho ))),
vfamily = c("binom2.Rho", "binom2"),
deriv = eval(substitute(expression({
- pmargin = cbind(eta2theta(eta[,1], .lmu12, earg = .emu12),
- eta2theta(eta[,2], .lmu12, earg = .emu12))
+ pmargin = cbind(eta2theta(eta[, 1], .lmu12, earg = .emu12),
+ eta2theta(eta[, 2], .lmu12, earg = .emu12))
rhovec = rep( .rho , len = nrow(eta))
- p11 = pnorm2(eta[,1], eta[,2], rhovec)
- p01 = pmargin[,2]-p11
- p10 = pmargin[,1]-p11
+ p11 = pnorm2(eta[, 1], eta[, 2], rhovec)
+ p01 = pmargin[, 2]-p11
+ p10 = pmargin[, 1]-p11
p00 = 1-p01-p10-p11
- ABmat = (eta[,1:2] - rhovec*eta[,2:1]) / sqrt(1-rhovec^2)
- PhiA = pnorm(ABmat[,1])
- PhiB = pnorm(ABmat[,2])
- onemPhiA = pnorm(ABmat[,1], lower.tail = FALSE)
- onemPhiB = pnorm(ABmat[,2], lower.tail = FALSE)
-
- smallno = 1000 * .Machine$double.eps
- p00[p00 < smallno] = smallno
- p01[p01 < smallno] = smallno
- p10[p10 < smallno] = smallno
- p11[p11 < smallno] = smallno
-
- dprob00 = dnorm2(eta[,1], eta[,2], rhovec)
- dl.dprob1 = PhiB*(y[,4]/p11-y[,2]/p01) + onemPhiB*(y[,3]/p10-y[,1]/p00)
- dl.dprob2 = PhiA*(y[,4]/p11-y[,3]/p10) + onemPhiA*(y[,2]/p01-y[,1]/p00)
- dprob1.deta = dtheta.deta(pmargin[,1], .lmu12, earg = .emu12)
- dprob2.deta = dtheta.deta(pmargin[,2], .lmu12, earg = .emu12)
- dthetas.detas = cbind(dprob1.deta, dprob2.deta)
-
- c(w) * cbind(dl.dprob1, dl.dprob2) * dthetas.detas
- }), list( .lmu12 = lmu12, .emu12 = emu12, .rho = rho ))),
- weight = eval(substitute(expression({
- if (is.null( .nsimEIM)) {
- d2l.dprob1prob1 = PhiB^2 *(1/p11+1/p01) + onemPhiB^2 *(1/p10+1/p00)
- d2l.dprob2prob2 = PhiA^2 *(1/p11+1/p10) + onemPhiA^2 *(1/p01+1/p00)
- d2l.dprob1prob2 = PhiA * (PhiB/p11 - onemPhiB/p10) +
- onemPhiA * (onemPhiB/p00 - PhiB/p01)
- wz = matrix(0, n, dimm(M)) # 6=dimm(M)
- wz[,iam(1,1,M)] = d2l.dprob1prob1 * dprob1.deta^2
- wz[,iam(2,2,M)] = d2l.dprob2prob2 * dprob2.deta^2
- wz[,iam(1,2,M)] = d2l.dprob1prob2 * dprob1.deta * dprob2.deta
- } else {
- run.varcov = 0
- ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- for (ii in 1:( .nsimEIM )) {
- ysim = rbinom2.rho(n = n, mu1=pmargin[,1], mu2=pmargin[,2],
- twoCols = FALSE, rho=rhovec)
- dl.dprob1 = PhiB * (ysim[,4]/p11-ysim[,2]/p01) +
- onemPhiB * (ysim[,3]/p10-ysim[,1]/p00)
- dl.dprob2 = PhiA * (ysim[,4]/p11-ysim[,3]/p10) +
- onemPhiA * (ysim[,2]/p01-ysim[,1]/p00)
-
- rm(ysim)
- temp3 = cbind(dl.dprob1, dl.dprob2)
- run.varcov = ((ii-1) * run.varcov +
- temp3[,ind1$row.index] * temp3[,ind1$col.index]) / ii
- }
- wz = if (intercept.only)
- matrix(colMeans(run.varcov),
- n, ncol(run.varcov), byrow = TRUE) else run.varcov
-
- wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
- }
- c(w) * wz
- }), list( .nsimEIM = nsimEIM ))))
+ ABmat = (eta[, 1:2] - rhovec*eta[, 2:1]) / sqrt(1-rhovec^2)
+ PhiA = pnorm(ABmat[, 1])
+ PhiB = pnorm(ABmat[, 2])
+ onemPhiA = pnorm(ABmat[, 1], lower.tail = FALSE)
+ onemPhiB = pnorm(ABmat[, 2], lower.tail = FALSE)
+
+ smallno = 1000 * .Machine$double.eps
+ p00[p00 < smallno] = smallno
+ p01[p01 < smallno] = smallno
+ p10[p10 < smallno] = smallno
+ p11[p11 < smallno] = smallno
+
+ dprob00 = dnorm2(eta[, 1], eta[, 2], rhovec)
+ dl.dprob1 = PhiB*(y[, 4]/p11-y[, 2]/p01) +
+ onemPhiB*(y[, 3]/p10-y[, 1]/p00)
+ dl.dprob2 = PhiA*(y[, 4]/p11-y[, 3]/p10) +
+ onemPhiA*(y[, 2]/p01-y[, 1]/p00)
+ dprob1.deta = dtheta.deta(pmargin[, 1], .lmu12, earg = .emu12)
+ dprob2.deta = dtheta.deta(pmargin[, 2], .lmu12, earg = .emu12)
+ dthetas.detas = cbind(dprob1.deta, dprob2.deta)
+
+ c(w) * cbind(dl.dprob1, dl.dprob2) * dthetas.detas
+ }), list( .lmu12 = lmu12, .emu12 = emu12, .rho = rho ))),
+ weight = eval(substitute(expression({
+ if (is.null( .nsimEIM)) {
+ d2l.dprob1prob1 = PhiB^2 *(1/p11+1/p01) + onemPhiB^2 *(1/p10+1/p00)
+ d2l.dprob2prob2 = PhiA^2 *(1/p11+1/p10) + onemPhiA^2 *(1/p01+1/p00)
+ d2l.dprob1prob2 = PhiA * (PhiB/p11 - onemPhiB/p10) +
+ onemPhiA * (onemPhiB/p00 - PhiB/p01)
+ wz = matrix(0, n, dimm(M)) # 6=dimm(M)
+ wz[, iam(1, 1, M)] = d2l.dprob1prob1 * dprob1.deta^2
+ wz[, iam(2, 2, M)] = d2l.dprob2prob2 * dprob2.deta^2
+ wz[, iam(1, 2, M)] = d2l.dprob1prob2 * dprob1.deta * dprob2.deta
+ } else {
+ run.varcov = 0
+ ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ for (ii in 1:( .nsimEIM )) {
+ ysim = rbinom2.rho(n = n, mu1 = pmargin[, 1],
+ mu2 = pmargin[, 2],
+ twoCols = FALSE, rho = rhovec)
+ dl.dprob1 = PhiB * (ysim[, 4]/p11-ysim[, 2]/p01) +
+ onemPhiB * (ysim[, 3]/p10-ysim[, 1]/p00)
+ dl.dprob2 = PhiA * (ysim[, 4]/p11-ysim[, 3]/p10) +
+ onemPhiA * (ysim[, 2]/p01-ysim[, 1]/p00)
+
+ rm(ysim)
+ temp3 = cbind(dl.dprob1, dl.dprob2)
+ run.varcov = ((ii-1) * run.varcov +
+ temp3[, ind1$row.index] *
+ temp3[, ind1$col.index]) / ii
+ }
+ wz = if (intercept.only)
+ matrix(colMeans(run.varcov),
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
+
+ wz = wz * dthetas.detas[, ind1$row] *
+ dthetas.detas[, ind1$col]
+ }
+ c(w) * wz
+ }), list( .nsimEIM = nsimEIM ))))
}
diff --git a/R/family.bivariate.R b/R/family.bivariate.R
index 46d10b0..19b8473 100644
--- a/R/family.bivariate.R
+++ b/R/family.bivariate.R
@@ -15,139 +15,186 @@
bilogistic4.control <- function(save.weight = TRUE, ...)
{
- list(save.weight=save.weight)
+ list(save.weight = save.weight)
}
- bilogistic4 = function(llocation = "identity",
- lscale = "loge",
- iloc1 = NULL, iscale1 = NULL,
- iloc2 = NULL, iscale2 = NULL,
- imethod = 1, zero = NULL) {
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2) stop("imethod must be 1 or 2")
-
- new("vglmff",
- blurb = c("Bivariate logistic distribution\n\n",
- "Link: ",
- namesof("location1", llocation), ", ",
- namesof("scale1", lscale), ", ",
- namesof("location2", llocation), ", ",
- namesof("scale2", lscale),
- "\n", "\n",
- "Means: location1, location2"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero))),
- initialize = eval(substitute(expression({
- if (!is.matrix(y) || ncol(y) != 2)
- stop("the response must be a 2-column matrix")
-
- predictors.names = c(namesof("location1", .llocation, tag= FALSE),
- namesof("scale1", .lscale, tag= FALSE),
- namesof("location2", .llocation, tag= FALSE),
- namesof("scale2", .lscale, tag= FALSE))
-
- if (!length(etastart)) {
- if ( .imethod == 1) {
- location.init1 = y[, 1]
- scale.init1 = sqrt(3) * sd(y[, 1]) / pi
- location.init2 = y[, 2]
- scale.init2 = sqrt(3) * sd(y[, 2]) / pi
- } else {
- location.init1 = median(rep(y[, 1], w))
- location.init2 = median(rep(y[, 2], w))
- scale.init1=sqrt(3)*sum(w*(y[, 1]-location.init1)^2)/(sum(w)*pi)
- scale.init2=sqrt(3)*sum(w*(y[, 2]-location.init2)^2)/(sum(w)*pi)
- }
- loc1.init = if (length(.iloc1)) rep(.iloc1, length.out = n) else
- rep(location.init1, length.out = n)
- loc2.init = if (length(.iloc2)) rep(.iloc2, length.out = n) else
- rep(location.init2, length.out = n)
- scale1.init = if (length(.iscale1)) rep(.iscale1, length.out = n) else
- rep(1, length.out = n)
- scale2.init = if (length(.iscale2)) rep(.iscale2, length.out = n) else
- rep(1, length.out = n)
- if (.llocation == "loge") location.init1 = abs(location.init1) + 0.001
- if (.llocation == "loge") location.init2 = abs(location.init2) + 0.001
- etastart = cbind(theta2eta(location.init1, .llocation),
- theta2eta(scale1.init, .lscale),
- theta2eta(location.init2, .llocation),
- theta2eta(scale2.init, .lscale))
- }
- }), list(.imethod = imethod, .iloc1=iloc1, .iloc2=iloc2,
- .llocation=llocation,
- .iscale1=iscale1, .iscale2=iscale2, .lscale=lscale))),
- linkinv = function(eta, extra = NULL) {
- cbind(eta[, 1], eta[, 2])
- },
- last = eval(substitute(expression({
- misc$link = c(location1= .llocation, scale1= .lscale,
- location2= .llocation, scale2= .lscale)
- misc$expected = FALSE
- misc$BFGS = TRUE
- }), list(.lscale=lscale, .llocation=llocation))),
- loglikelihood = eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra = NULL) {
- loc1 = eta2theta(eta[, 1], .llocation)
- Scale1 = eta2theta(eta[, 2], .lscale)
- loc2 = eta2theta(eta[, 3], .llocation)
- Scale2 = eta2theta(eta[, 4], .lscale)
- zedd1 = (y[, 1]-loc1) / Scale1
- zedd2 = (y[, 2]-loc2) / Scale2
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * (-zedd1 - zedd2 - 3 * log1p(exp(-zedd1)+exp(-zedd2)) -
- log(Scale1) - log(Scale2)))
- }, list(.lscale=lscale, .llocation=llocation))),
- vfamily = c("bilogistic4"),
- deriv = eval(substitute(expression({
- loc1 = eta2theta(eta[, 1], .llocation)
- Scale1 = eta2theta(eta[, 2], .lscale)
- loc2 = eta2theta(eta[, 3], .llocation)
- Scale2 = eta2theta(eta[, 4], .lscale)
- zedd1 = (y[, 1]-loc1) / Scale1
- zedd2 = (y[, 2]-loc2) / Scale2
- ezedd1 = exp(-zedd1)
- ezedd2 = exp(-zedd2)
- denom = 1 + ezedd1 + ezedd2
- dl.dloc1 = (1 - 3 * ezedd1 / denom) / Scale1
- dl.dloc2 = (1 - 3 * ezedd2 / denom) / Scale2
- dl.dscale1 = (zedd1 - 1 - 3 * ezedd1 * zedd1 / denom) / Scale1
- dl.dscale2 = (zedd2 - 1 - 3 * ezedd2 * zedd2 / denom) / Scale2
- dloc1.deta = dtheta.deta(loc1, .llocation)
- dloc2.deta = dtheta.deta(loc2, .llocation)
- dscale1.deta = dtheta.deta(Scale1, .lscale)
- dscale2.deta = dtheta.deta(Scale2, .lscale)
- if (iter == 1) {
- etanew = eta
- } else {
- derivold = derivnew
- etaold = etanew
- etanew = eta
- }
- derivnew = c(w) * cbind(dl.dloc1 * dloc1.deta,
- dl.dscale1 * dscale1.deta,
- dl.dloc2 * dloc2.deta,
- dl.dscale2 * dscale2.deta)
- derivnew
- }), list(.lscale=lscale, .llocation=llocation))),
- weight = eval(substitute(expression({
- if (iter == 1) {
- wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
+ bilogistic4 <- function(llocation = "identity",
+ lscale = "loge",
+ iloc1 = NULL, iscale1 = NULL,
+ iloc2 = NULL, iscale2 = NULL,
+ imethod = 1, zero = NULL) {
+
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
+
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2) stop("imethod must be 1 or 2")
+
+ new("vglmff",
+ blurb = c("Bivariate logistic distribution\n\n",
+ "Link: ",
+ namesof("location1", llocat, elocat), ", ",
+ namesof("scale1", lscale, escale), ", ",
+ namesof("location2", llocat, elocat), ", ",
+ namesof("scale2", lscale, escale),
+ "\n", "\n",
+ "Means: location1, location2"),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero))),
+ initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 2,
+ ncol.y.min = 2,
+ out.wy = TRUE,
+ colsyperw = 2,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ predictors.names <-
+ c(namesof("location1", .llocat, .elocat , tag = FALSE),
+ namesof("scale1", .lscale, .escale , tag = FALSE),
+ namesof("location2", .llocat, .elocat , tag = FALSE),
+ namesof("scale2", .lscale, .escale , tag = FALSE))
+
+ if (!length(etastart)) {
+ if ( .imethod == 1) {
+ locat.init1 = y[, 1]
+ scale.init1 = sqrt(3) * sd(y[, 1]) / pi
+ locat.init2 = y[, 2]
+ scale.init2 = sqrt(3) * sd(y[, 2]) / pi
} else {
- wzold = wznew
- wznew = qnupdate(w=w, wzold=wzold, dderiv=(derivold - derivnew),
- deta=etanew-etaold, M=M,
- trace=trace) # weights incorporated in args
+ locat.init1 = median(rep(y[, 1], w))
+ locat.init2 = median(rep(y[, 2], w))
+ const4 = sqrt(3) / (sum(w) * pi)
+ scale.init1 = const4 * sum(c(w) *(y[, 1] - locat.init1)^2)
+ scale.init2 = const4 * sum(c(w) *(y[, 2] - locat.init2)^2)
}
- wznew
- }), list(.lscale=lscale, .llocation=llocation))))
+ loc1.init = if (length( .iloc1 ))
+ rep( .iloc1, length.out = n) else
+ rep(locat.init1, length.out = n)
+ loc2.init = if (length( .iloc2 ))
+ rep( .iloc2, length.out = n) else
+ rep(locat.init2, length.out = n)
+ scale1.init = if (length( .iscale1 ))
+ rep( .iscale1, length.out = n) else
+ rep(1, length.out = n)
+ scale2.init = if (length( .iscale2 ))
+ rep( .iscale2, length.out = n) else
+ rep(1, length.out = n)
+
+ if ( .llocat == "loge")
+ locat.init1 = abs(locat.init1) + 0.001
+ if ( .llocat == "loge")
+ locat.init2 = abs(locat.init2) + 0.001
+
+ etastart = cbind(theta2eta(locat.init1, .llocat , .elocat ),
+ theta2eta(scale1.init, .lscale , .escale ),
+ theta2eta(locat.init2, .llocat , .elocat ),
+ theta2eta(scale2.init, .lscale , .escale ))
+ }
+ }), list(.imethod = imethod,
+ .iloc1 = iloc1, .iloc2 = iloc2,
+ .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale,
+ .iscale1 = iscale1, .iscale2 = iscale2))),
+ linkinv = function(eta, extra = NULL) {
+ cbind(eta[, 1], eta[, 2])
+ },
+ last = eval(substitute(expression({
+ misc$link = c(location1 = .llocat, scale1 = .lscale,
+ location2 = .llocat, scale2 = .lscale)
+
+ misc$earg = list(location1 = .elocat, scale1 = .escale,
+ location2 = .elocat, scale2 = .escale)
+
+ misc$expected = FALSE
+
+ misc$BFGS = TRUE
+ misc$multipleResponses <- FALSE
+ }), list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ locat1 = eta2theta(eta[, 1], .llocat , .elocat )
+ Scale1 = eta2theta(eta[, 2], .lscale , .escale )
+ locat2 = eta2theta(eta[, 3], .llocat , .elocat )
+ Scale2 = eta2theta(eta[, 4], .lscale , .escale )
+
+ zedd1 = (y[, 1]-locat1) / Scale1
+ zedd2 = (y[, 2]-locat2) / Scale2
+
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(c(w) * (-zedd1 - zedd2 - 3 * log1p(exp(-zedd1)+exp(-zedd2)) -
+ log(Scale1) - log(Scale2)))
+ }, list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale ))),
+ vfamily = c("bilogistic4"),
+ deriv = eval(substitute(expression({
+ locat1 = eta2theta(eta[, 1], .llocat , .elocat )
+ Scale1 = eta2theta(eta[, 2], .lscale , .escale )
+ locat2 = eta2theta(eta[, 3], .llocat , .elocat )
+ Scale2 = eta2theta(eta[, 4], .lscale , .escale )
+
+ zedd1 = (y[, 1]-locat1) / Scale1
+ zedd2 = (y[, 2]-locat2) / Scale2
+ ezedd1 = exp(-zedd1)
+ ezedd2 = exp(-zedd2)
+ denom = 1 + ezedd1 + ezedd2
+
+ dl.dlocat1 = (1 - 3 * ezedd1 / denom) / Scale1
+ dl.dlocat2 = (1 - 3 * ezedd2 / denom) / Scale2
+ dl.dscale1 = (zedd1 - 1 - 3 * ezedd1 * zedd1 / denom) / Scale1
+ dl.dscale2 = (zedd2 - 1 - 3 * ezedd2 * zedd2 / denom) / Scale2
+
+ dlocat1.deta = dtheta.deta(locat1, .llocat , .elocat )
+ dlocat2.deta = dtheta.deta(locat2, .llocat , .elocat )
+ dscale1.deta = dtheta.deta(Scale1, .lscale , .escale )
+ dscale2.deta = dtheta.deta(Scale2, .lscale , .escale )
+
+ if (iter == 1) {
+ etanew = eta
+ } else {
+ derivold = derivnew
+ etaold = etanew
+ etanew = eta
+ }
+ derivnew = c(w) * cbind(dl.dlocat1 * dlocat1.deta,
+ dl.dscale1 * dscale1.deta,
+ dl.dlocat2 * dlocat2.deta,
+ dl.dscale2 * dscale2.deta)
+ derivnew
+ }), list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale ))),
+ weight = eval(substitute(expression({
+ if (iter == 1) {
+ wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
+ } else {
+ wzold = wznew
+ wznew = qnupdate(w = w, wzold=wzold, dderiv=(derivold - derivnew),
+ deta=etanew-etaold, M = M,
+ trace=trace) # weights incorporated in args
+ }
+ wznew
+ }), list( .lscale = lscale,
+ .escale = escale,
+ .llocat = llocat))))
}
@@ -155,11 +202,13 @@ bilogistic4.control <- function(save.weight = TRUE, ...)
-dbilogis4 = function(x1, x2, loc1 = 0, scale1 = 1,
- loc2 = 0, scale2 = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
+dbilogis4 <- function(x1, x2, loc1 = 0, scale1 = 1,
+ loc2 = 0, scale2 = 1, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+
L = max(length(x1), length(x2), length(loc1), length(loc2),
@@ -176,7 +225,7 @@ dbilogis4 = function(x1, x2, loc1 = 0, scale1 = 1,
-pbilogis4 = function(q1, q2, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
+pbilogis4 <- function(q1, q2, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
if (!is.Numeric(q1)) stop("bad input for 'q1'")
if (!is.Numeric(q2)) stop("bad input for 'q2'")
if (!is.Numeric(scale1, positive = TRUE)) stop("bad input for 'scale1'")
@@ -188,7 +237,7 @@ pbilogis4 = function(q1, q2, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
-rbilogis4 = function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
+rbilogis4 <- function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
if (!is.Numeric(n, positive = TRUE,
allowable.length = 1,integer.valued = TRUE))
stop("bad input for 'n'")
@@ -204,30 +253,31 @@ rbilogis4 = function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
- freund61 = function(la = "loge",
- lap = "loge",
- lb = "loge",
- lbp = "loge",
- ea = list(),
- eap = list(),
- eb = list(),
- ebp = list(),
- ia = NULL, iap = NULL, ib = NULL, ibp = NULL,
- independent = FALSE,
- zero = NULL) {
- if (mode(la) != "character" && mode(la) != "name")
- la = as.character(substitute(la))
- if (mode(lap) != "character" && mode(lap) != "name")
- lap = as.character(substitute(lap))
- if (mode(lb) != "character" && mode(lb) != "name")
- lb = as.character(substitute(lb))
- if (mode(lbp) != "character" && mode(lbp) != "name")
- lbp = as.character(substitute(lbp))
-
- if (!is.list(ea )) ea = list()
- if (!is.list(eap)) eap = list()
- if (!is.list(eb )) eb = list()
- if (!is.list(ebp)) ebp = list()
+ freund61 <- function(la = "loge",
+ lap = "loge",
+ lb = "loge",
+ lbp = "loge",
+ ia = NULL, iap = NULL, ib = NULL, ibp = NULL,
+ independent = FALSE,
+ zero = NULL) {
+ la <- as.list(substitute(la))
+ ea <- link2list(la)
+ la <- attr(ea, "function.name")
+
+ lap <- as.list(substitute(lap))
+ eap <- link2list(lap)
+ lap <- attr(eap, "function.name")
+
+ lb <- as.list(substitute(lb))
+ eb <- link2list(lb)
+ lb <- attr(eb, "function.name")
+
+
+ lbp <- as.list(substitute(lbp))
+ ebp <- link2list(lbp)
+ lbp <- attr(ebp, "function.name")
+
+
new("vglmff",
blurb = c("Freund (1961) bivariate exponential distribution\n",
@@ -243,22 +293,35 @@ rbilogis4 = function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list(.independent = independent, .zero = zero))),
initialize = eval(substitute(expression({
- if (!is.matrix(y) || ncol(y) != 2)
- stop("the response must be a 2 column matrix")
- predictors.names =
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 2,
+ ncol.y.min = 2,
+ out.wy = TRUE,
+ colsyperw = 2,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ predictors.names <-
c(namesof("a", .la, earg = .ea , short = TRUE),
namesof("ap", .lap, earg = .eap, short = TRUE),
namesof("b", .lb, earg = .eb , short = TRUE),
namesof("bp", .lbp, earg = .ebp, short = TRUE))
extra$y1.lt.y2 = y[, 1] < y[, 2]
- if (!(arr <- sum(extra$y1.lt.y2)) || arr==n)
+ if (!(arr <- sum(extra$y1.lt.y2)) || arr == n)
stop("identifiability problem: either all y1<y2 or y2<y1")
if (!length(etastart)) {
- sumx = sum(y[extra$y1.lt.y2, 1]); sumxp = sum(y[!extra$y1.lt.y2, 1])
- sumy = sum(y[extra$y1.lt.y2, 2]); sumyp = sum(y[!extra$y1.lt.y2, 2])
+ sumx = sum(y[ extra$y1.lt.y2, 1]);
+ sumxp = sum(y[!extra$y1.lt.y2, 1])
+ sumy = sum(y[ extra$y1.lt.y2, 2]);
+ sumyp = sum(y[!extra$y1.lt.y2, 2])
+
if (FALSE) { # Noise:
arr = min(arr + n/10, n*0.95)
sumx = sumx * 1.1; sumxp = sumxp * 1.2;
@@ -279,7 +342,7 @@ rbilogis4 = function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
theta2eta(rep(binit, length.out = n), .lb, earg = .eb ),
theta2eta(rep(bpinit, length.out = n), .lbp, earg = .ebp ))
}
- }), list(.la = la, .lap = lap, .lb = lb, .lbp = lbp,
+ }), list( .la = la, .lap = lap, .lb = lb, .lbp = lbp,
.ea = ea, .eap = eap, .eb = eb, .ebp = ebp,
.ia = ia, .iap = iap, .ib = ib, .ibp = ibp))),
linkinv = eval(substitute(function(eta, extra = NULL) {
@@ -287,13 +350,17 @@ rbilogis4 = function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
alphap = eta2theta(eta[, 2], .lap, earg = .eap )
beta = eta2theta(eta[, 3], .lb, earg = .eb )
betap = eta2theta(eta[, 4], .lbp, earg = .ebp )
- cbind((alphap+beta) / (alphap*(alpha+beta)),
- (alpha+betap) / (betap*(alpha+beta)))
- }, list(.la = la, .lap = lap, .lb = lb, .lbp = lbp,
- .ea = ea, .eap = eap, .eb = eb, .ebp = ebp ))),
+ cbind((alphap + beta) / (alphap * (alpha + beta)),
+ (alpha + betap) / (betap * (alpha + beta)))
+ }, list( .la = la, .lap = lap, .lb = lb, .lbp = lbp,
+ .ea = ea, .eap = eap, .eb = eb, .ebp = ebp ))),
last = eval(substitute(expression({
- misc$link = c("a"= .la, "ap"= .lap, "b"= .lb, "bp"= .lbp)
- }), list(.la = la, .lap = lap, .lb = lb, .lbp = lbp))),
+ misc$link = c("a" = .la, "ap" = .lap, "b" = .lb, "bp" = .lbp)
+ misc$earg = list("a" = .ea, "ap" = .eap, "b" = .eb, "bp" = .ebp)
+
+ misc$multipleResponses <- FALSE
+ }), list( .la = la, .lap = lap, .lb = lb, .lbp = lbp,
+ .ea = ea, .eap = eap, .eb = eb, .ebp = ebp ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
alpha = eta2theta(eta[, 1], .la, earg = .ea )
@@ -310,8 +377,8 @@ rbilogis4 = function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
alphap[!tmp88] * y[!tmp88, 1] -
(alpha+beta-alphap)[!tmp88] * y[!tmp88, 2]
sum(w[tmp88] * ell1) + sum(w[!tmp88] * ell2) }
- }, list(.la = la, .lap = lap, .lb = lb, .lbp = lbp,
- .ea = ea, .eap = eap, .eb = eb, .ebp = ebp ))),
+ }, list( .la = la, .lap = lap, .lb = lb, .lbp = lbp,
+ .ea = ea, .eap = eap, .eb = eb, .ebp = ebp ))),
vfamily = c("freund61"),
deriv = eval(substitute(expression({
tmp88 = extra$y1.lt.y2
@@ -338,22 +405,24 @@ rbilogis4 = function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
d2 * dalphap.deta,
d3 * dbeta.deta,
d4 * dbetap.deta)
- }), list(.la = la, .lap = lap, .lb = lb, .lbp = lbp,
- .ea = ea, .eap = eap, .eb = eb, .ebp = ebp ))),
+ }), list( .la = la, .lap = lap, .lb = lb, .lbp = lbp,
+ .ea = ea, .eap = eap, .eb = eb, .ebp = ebp ))),
weight = eval(substitute(expression({
py1.lt.y2 = alpha / (alpha+beta)
d11 = py1.lt.y2 / alpha^2
d22 = (1-py1.lt.y2) / alphap^2
d33 = (1-py1.lt.y2) / beta^2
d44 = py1.lt.y2 / betap^2
- wz = matrix(0, n, M) # diagonal
+
+ wz = matrix(0, n, M) # diagonal
wz[, iam(1, 1, M)] = dalpha.deta^2 * d11
wz[, iam(2, 2, M)] = dalphap.deta^2 * d22
wz[, iam(3, 3, M)] = dbeta.deta^2 * d33
wz[, iam(4, 4, M)] = dbetap.deta^2 * d44
+
c(w) * wz
- }), list(.la = la, .lap = lap, .lb = lb, .lbp = lbp,
- .ea = ea, .eap = eap, .eb = eb, .ebp = ebp ))))
+ }), list( .la = la, .lap = lap, .lb = lb, .lbp = lbp,
+ .ea = ea, .eap = eap, .eb = eb, .ebp = ebp ))))
}
@@ -363,145 +432,195 @@ rbilogis4 = function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
- bivgamma.mckay = function(lscale = "loge",
- lshape1 = "loge",
- lshape2 = "loge",
- iscale = NULL,
- ishape1 = NULL,
- ishape2 = NULL,
- imethod = 1,
- zero = 1) {
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (mode(lshape1) != "character" && mode(lshape1) != "name")
- lshape1 = as.character(substitute(lshape1))
- if (mode(lshape2) != "character" && mode(lshape2) != "name")
- lshape2 = as.character(substitute(lshape2))
- if (!is.null(iscale))
- if (!is.Numeric(iscale, positive = TRUE))
- stop("'iscale' must be positive or NULL")
- if (!is.null(ishape1))
- if (!is.Numeric(ishape1, positive = TRUE))
- stop("'ishape1' must be positive or NULL")
- if (!is.null(ishape2))
- if (!is.Numeric(ishape2, positive = TRUE))
- stop("'ishape2' must be positive or NULL")
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2.5)
- stop("argument 'imethod' must be 1 or 2")
-
- new("vglmff",
- blurb = c("Bivariate gamma: McKay's distribution\n",
- "Links: ",
- namesof("scale", lscale), ", ",
- namesof("shape1", lshape1), ", ",
- namesof("shape2", lshape2)),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (!is.matrix(y) || ncol(y) != 2)
- stop("the response must be a 2 column matrix")
- if (any(y[, 1] >= y[, 2]))
- stop("the second column minus the first column must be a vector ",
- "of positive values")
- predictors.names = c(namesof("scale", .lscale, short = TRUE),
- namesof("shape1", .lshape1, short = TRUE),
- namesof("shape2", .lshape2, short = TRUE))
- if (!length(etastart)) {
- momentsY = if ( .imethod == 1) {
- cbind(median(y[, 1]), # This may not be monotonic
- median(y[, 2])) + 0.01
- } else {
- cbind(weighted.mean(y[, 1], w),
- weighted.mean(y[, 2], w))
- }
+ bivgamma.mckay <- function(lscale = "loge",
+ lshape1 = "loge",
+ lshape2 = "loge",
+ iscale = NULL,
+ ishape1 = NULL,
+ ishape2 = NULL,
+ imethod = 1,
+ zero = 1) {
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
- mcg2.loglik = function(thetaval, y, x, w, extraargs) {
- ainit = a = thetaval
- momentsY = extraargs$momentsY
- p = (1/a) * abs(momentsY[1]) + 0.01
- q = (1/a) * abs(momentsY[2] - momentsY[1]) + 0.01
- sum(w * (-(p+q)*log(a) - lgamma(p) - lgamma(q) +
- (p - 1)*log(y[, 1]) + (q - 1)*log(y[, 2]-y[, 1]) - y[, 2] / a ))
- }
+ lshape1 <- as.list(substitute(lshape1))
+ eshape1 <- link2list(lshape1)
+ lshape1 <- attr(eshape1, "function.name")
+
+ lshape2 <- as.list(substitute(lshape2))
+ eshape2 <- link2list(lshape2)
+ lshape2 <- attr(eshape2, "function.name")
+
+
+ if (!is.null(iscale))
+ if (!is.Numeric(iscale, positive = TRUE))
+ stop("'iscale' must be positive or NULL")
+ if (!is.null(ishape1))
+ if (!is.Numeric(ishape1, positive = TRUE))
+ stop("'ishape1' must be positive or NULL")
+ if (!is.null(ishape2))
+ if (!is.Numeric(ishape2, positive = TRUE))
+ stop("'ishape2' must be positive or NULL")
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2.5)
+ stop("argument 'imethod' must be 1 or 2")
+
+
+
+ new("vglmff",
+ blurb = c("Bivariate gamma: McKay's distribution\n",
+ "Links: ",
+ namesof("scale", lscale), ", ",
+ namesof("shape1", lshape1), ", ",
+ namesof("shape2", lshape2)),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 2,
+ ncol.y.min = 2,
+ out.wy = TRUE,
+ colsyperw = 2,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
- a.grid = if (length( .iscale )) c( .iscale ) else
- c(0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100)
- extraargs = list(momentsY = momentsY)
- ainit = getMaxMin(a.grid, objfun=mcg2.loglik,
- y=y, x=x, w=w, maximize = TRUE,
- extraargs = extraargs)
- ainit = rep(if(is.Numeric( .iscale )) .iscale else ainit, length.out = n)
- pinit = (1/ainit) * abs(momentsY[1]) + 0.01
- qinit = (1/ainit) * abs(momentsY[2] - momentsY[1]) + 0.01
- pinit = rep(if(is.Numeric( .ishape1 )) .ishape1 else pinit, length.out = n)
- qinit = rep(if(is.Numeric( .ishape2 )) .ishape2 else qinit, length.out = n)
+ if (any(y[, 1] >= y[, 2]))
+ stop("the second column minus the first column must be a vector ",
+ "of positive values")
+
+
+ predictors.names <-
+ c(namesof("scale", .lscale, .escale, short = TRUE),
+ namesof("shape1", .lshape1, .eshape1, short = TRUE),
+ namesof("shape2", .lshape2, .eshape2, short = TRUE))
+
+ if (!length(etastart)) {
+ momentsY = if ( .imethod == 1) {
+ cbind(median(y[, 1]), # This may not be monotonic
+ median(y[, 2])) + 0.01
+ } else {
+ cbind(weighted.mean(y[, 1], w),
+ weighted.mean(y[, 2], w))
+ }
+
+ mcg2.loglik <- function(thetaval, y, x, w, extraargs) {
+ ainit = a = thetaval
+ momentsY = extraargs$momentsY
+ p = (1/a) * abs(momentsY[1]) + 0.01
+ q = (1/a) * abs(momentsY[2] - momentsY[1]) + 0.01
+ sum(c(w) * (-(p+q)*log(a) - lgamma(p) - lgamma(q) +
+ (p - 1)*log(y[, 1]) +
+ (q - 1)*log(y[, 2]-y[, 1]) - y[, 2] / a ))
+ }
+
+ a.grid = if (length( .iscale )) c( .iscale ) else
+ c(0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100)
+ extraargs = list(momentsY = momentsY)
+ ainit = getMaxMin(a.grid, objfun=mcg2.loglik,
+ y=y, x=x, w = w, maximize = TRUE,
+ extraargs = extraargs)
+ ainit = rep(if(is.Numeric( .iscale )) .iscale else ainit,
+ length.out = n)
+ pinit = (1/ainit) * abs(momentsY[1]) + 0.01
+ qinit = (1/ainit) * abs(momentsY[2] - momentsY[1]) + 0.01
+
+ pinit = rep(if(is.Numeric( .ishape1 )) .ishape1 else pinit,
+ length.out = n)
+ qinit = rep(if(is.Numeric( .ishape2 )) .ishape2 else qinit,
+ length.out = n)
etastart = cbind(theta2eta(ainit, .lscale),
theta2eta(pinit, .lshape1),
theta2eta(qinit, .lshape2))
}
- }), list( .lscale=lscale, .lshape1=lshape1, .lshape2=lshape2,
- .iscale=iscale, .ishape1=ishape1, .ishape2=ishape2,
+ }), list( .lscale = lscale, .lshape1 = lshape1, .lshape2 = lshape2,
+ .escale = escale, .eshape1 = eshape1, .eshape2 = eshape2,
+ .iscale = iscale, .ishape1 = ishape1, .ishape2 = ishape2,
.imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- a = eta2theta(eta[, 1], .lscale)
- p = eta2theta(eta[, 2], .lshape1)
- q = eta2theta(eta[, 3], .lshape2)
- cbind("y1"=p*a, "y2"=(p+q)*a)
- }, list( .lscale=lscale, .lshape1=lshape1, .lshape2=lshape2 ))),
- last = eval(substitute(expression({
- misc$link = c("scale"= .lscale, "shape1"= .lshape1, "shape2"= .lshape2)
- misc$ishape1 = .ishape1
- misc$ishape2 = .ishape2
- misc$iscale = .iscale
- misc$expected = TRUE
- }), list( .lscale=lscale, .lshape1=lshape1, .lshape2=lshape2,
- .iscale=iscale, .ishape1=ishape1, .ishape2=ishape2 ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- a = eta2theta(eta[, 1], .lscale)
- p = eta2theta(eta[, 2], .lshape1)
- q = eta2theta(eta[, 3], .lshape2)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * (-(p+q)*log(a) - lgamma(p) - lgamma(q) +
- (p - 1)*log(y[, 1]) + (q - 1)*log(y[, 2]-y[, 1]) - y[, 2] / a))
- }, list( .lscale=lscale, .lshape1=lshape1, .lshape2=lshape2 ))),
- vfamily = c("bivgamma.mckay"),
- deriv = eval(substitute(expression({
- aparam = eta2theta(eta[, 1], .lscale)
- shape1 = eta2theta(eta[, 2], .lshape1)
- shape2 = eta2theta(eta[, 3], .lshape2)
- dl.da = (-(shape1+shape2) + y[, 2] / aparam) / aparam
- dl.dshape1 = -log(aparam) - digamma(shape1) + log(y[, 1])
- dl.dshape2 = -log(aparam) - digamma(shape2) + log(y[, 2]-y[, 1])
- c(w) * cbind(dl.da * dtheta.deta(aparam, .lscale),
- dl.dshape1 * dtheta.deta(shape1, .lshape1),
- dl.dshape2 * dtheta.deta(shape2, .lshape2))
- }), list( .lscale=lscale, .lshape1=lshape1, .lshape2=lshape2 ))),
- weight = eval(substitute(expression({
- d11 = (shape1+shape2) / aparam^2
- d22 = trigamma(shape1)
- d33 = trigamma(shape2)
- d12 = 1 / aparam
- d13 = 1 / aparam
- d23 = 0
- wz = matrix(0, n, dimm(M))
- wz[, iam(1, 1, M)] = dtheta.deta(aparam, .lscale)^2 * d11
- wz[, iam(2, 2, M)] = dtheta.deta(shape1, .lshape1)^2 * d22
- wz[, iam(3, 3, M)] = dtheta.deta(shape2, .lshape2)^2 * d33
- wz[, iam(1, 2, M)] = dtheta.deta(aparam, .lscale) *
- dtheta.deta(shape1, .lshape1) * d12
- wz[, iam(1, 3, M)] = dtheta.deta(aparam, .lscale) *
- dtheta.deta(shape2, .lshape2) * d13
- wz[, iam(2, 3, M)] = dtheta.deta(shape1, .lshape1) *
- dtheta.deta(shape2, .lshape2) * d23
- c(w) * wz
- }), list( .lscale=lscale, .lshape1=lshape1, .lshape2=lshape2 ))))
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ a = eta2theta(eta[, 1], .lscale , .escale )
+ p = eta2theta(eta[, 2], .lshape1 , .eshape1 )
+ q = eta2theta(eta[, 3], .lshape2 , .eshape2 )
+ cbind("y1" = p*a,
+ "y2" = (p+q)*a)
+ }, list( .lscale = lscale, .lshape1 = lshape1, .lshape2 = lshape2,
+ .escale = escale, .eshape1 = eshape1, .eshape2 = eshape2 ))),
+ last = eval(substitute(expression({
+ misc$link = c("scale" = .lscale ,
+ "shape1" = .lshape1 ,
+ "shape2" = .lshape2 )
+ misc$earg = list("scale" = .escale ,
+ "shape1" = .eshape1 ,
+ "shape2" = .eshape2 )
+
+ misc$ishape1 = .ishape1
+ misc$ishape2 = .ishape2
+ misc$iscale = .iscale
+ misc$expected = TRUE
+ misc$multipleResponses <- FALSE
+ }), list( .lscale = lscale, .lshape1 = lshape1, .lshape2 = lshape2,
+ .escale = escale, .eshape1 = eshape1, .eshape2 = eshape2,
+ .iscale = iscale, .ishape1 = ishape1, .ishape2 = ishape2,
+ .imethod = imethod ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ a = eta2theta(eta[, 1], .lscale , .escale )
+ p = eta2theta(eta[, 2], .lshape1 , .eshape1 )
+ q = eta2theta(eta[, 3], .lshape2 , .eshape2 )
+
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(c(w) * (-(p+q)*log(a) - lgamma(p) - lgamma(q) +
+ (p - 1)*log(y[, 1]) + (q - 1)*log(y[, 2]-y[, 1]) -
+ y[, 2] / a))
+ }, list( .lscale = lscale, .lshape1 = lshape1, .lshape2 = lshape2,
+ .escale = escale, .eshape1 = eshape1, .eshape2 = eshape2 ))),
+ vfamily = c("bivgamma.mckay"),
+ deriv = eval(substitute(expression({
+ aparam = eta2theta(eta[, 1], .lscale , .escale )
+ shape1 = eta2theta(eta[, 2], .lshape1 , .eshape1 )
+ shape2 = eta2theta(eta[, 3], .lshape2 , .eshape2 )
+
+ dl.da = (-(shape1+shape2) + y[, 2] / aparam) / aparam
+ dl.dshape1 = -log(aparam) - digamma(shape1) + log(y[, 1])
+ dl.dshape2 = -log(aparam) - digamma(shape2) + log(y[, 2]-y[, 1])
+
+ c(w) * cbind(dl.da * dtheta.deta(aparam, .lscale),
+ dl.dshape1 * dtheta.deta(shape1, .lshape1),
+ dl.dshape2 * dtheta.deta(shape2, .lshape2))
+ }), list( .lscale = lscale, .lshape1 = lshape1, .lshape2 = lshape2,
+ .escale = escale, .eshape1 = eshape1, .eshape2 = eshape2 ))),
+ weight = eval(substitute(expression({
+ d11 = (shape1+shape2) / aparam^2
+ d22 = trigamma(shape1)
+ d33 = trigamma(shape2)
+ d12 = 1 / aparam
+ d13 = 1 / aparam
+ d23 = 0
+
+ wz = matrix(0, n, dimm(M))
+ wz[, iam(1, 1, M)] = dtheta.deta(aparam, .lscale)^2 * d11
+ wz[, iam(2, 2, M)] = dtheta.deta(shape1, .lshape1)^2 * d22
+ wz[, iam(3, 3, M)] = dtheta.deta(shape2, .lshape2)^2 * d33
+ wz[, iam(1, 2, M)] = dtheta.deta(aparam, .lscale) *
+ dtheta.deta(shape1, .lshape1) * d12
+ wz[, iam(1, 3, M)] = dtheta.deta(aparam, .lscale) *
+ dtheta.deta(shape2, .lshape2) * d13
+ wz[, iam(2, 3, M)] = dtheta.deta(shape1, .lshape1) *
+ dtheta.deta(shape2, .lshape2) * d23
+
+ c(w) * wz
+ }), list( .lscale = lscale, .lshape1 = lshape1,
+ .lshape2 = lshape2 ))))
}
@@ -513,31 +632,32 @@ rbilogis4 = function(n, loc1 = 0, scale1 = 1, loc2 = 0, scale2 = 1) {
-rfrank = function(n, alpha) {
- if (!is.Numeric(n, positive = TRUE,
- allowable.length = 1, integer.valued = TRUE))
- stop("bad input for argument 'n'")
- if (!is.Numeric(alpha, positive = TRUE))
- stop("bad input for argument 'alpha'")
- alpha = rep(alpha, length.out = n)
- U = runif(n)
- V = runif(n)
- T = alpha^U + (alpha - alpha^U) * V
- X = U
- index = abs(alpha - 1) < .Machine$double.eps
- Y = U
- if (any(!index))
- Y[!index] = logb(T[!index]/(T[!index]+(1-alpha[!index])*V[!index]),
- base=alpha[!index])
- ans = matrix(c(X,Y), nrow=n, ncol = 2)
- if (any(index)) {
- ans[index, 1] = runif(sum(index)) # Uniform density for alpha == 1
- ans[index, 2] = runif(sum(index))
- }
- ans
+rfrank <- function(n, alpha) {
+ if (!is.Numeric(n, positive = TRUE,
+ allowable.length = 1, integer.valued = TRUE))
+ stop("bad input for argument 'n'")
+ if (!is.Numeric(alpha, positive = TRUE))
+ stop("bad input for argument 'alpha'")
+ alpha = rep(alpha, length.out = n)
+ U = runif(n)
+ V = runif(n)
+ T = alpha^U + (alpha - alpha^U) * V
+ X = U
+ index = abs(alpha - 1) < .Machine$double.eps
+ Y = U
+ if (any(!index))
+ Y[!index] = logb(T[!index]/(T[!index]+(1-alpha[!index])*V[!index]),
+ base = alpha[!index])
+ ans = matrix(c(X,Y), nrow = n, ncol = 2)
+ if (any(index)) {
+ ans[index, 1] = runif(sum(index)) # Uniform density for alpha == 1
+ ans[index, 2] = runif(sum(index))
+ }
+ ans
}
-pfrank = function(q1, q2, alpha) {
+
+pfrank <- function(q1, q2, alpha) {
if (!is.Numeric(q1)) stop("bad input for 'q1'")
if (!is.Numeric(q2)) stop("bad input for 'q2'")
if (!is.Numeric(alpha, positive = TRUE)) stop("bad input for 'alpha'")
@@ -565,10 +685,12 @@ pfrank = function(q1, q2, alpha) {
ans
}
-dfrank = function(x1, x2, alpha, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
+
+dfrank <- function(x1, x2, alpha, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
if (!is.Numeric(x1)) stop("bad input for 'x1'")
if (!is.Numeric(x2)) stop("bad input for 'x2'")
@@ -601,103 +723,128 @@ dfrank = function(x1, x2, alpha, log = FALSE) {
frank.control <- function(save.weight = TRUE, ...)
{
- list(save.weight=save.weight)
+ list(save.weight = save.weight)
}
- frank = function(lapar = "loge", eapar = list(), iapar = 2, nsimEIM = 250) {
- if (mode(lapar) != "character" && mode(lapar) != "name")
- lapar = as.character(substitute(lapar))
- if (!is.Numeric(iapar, positive = TRUE))
- stop("'iapar' must be positive")
-
- if (!is.list(eapar)) eapar = list()
- if (length(nsimEIM) &&
- (!is.Numeric(nsimEIM, allowable.length = 1,
- integer.valued = TRUE) ||
- nsimEIM <= 50))
- stop("'nsimEIM' should be an integer greater than 50")
-
- new("vglmff",
- blurb = c("Frank's bivariate distribution\n",
- "Links: ",
- namesof("apar", lapar, earg = eapar )),
- initialize = eval(substitute(expression({
- if (!is.matrix(y) || ncol(y) != 2)
- stop("the response must be a 2 column matrix")
- if (any(y <= 0) || any(y >= 1))
- stop("the response must have values between 0 and 1")
- predictors.names =
- c(namesof("apar", .lapar, earg = .eapar, short = TRUE))
- if (length(dimnames(y)))
- extra$dimnamesy2 = dimnames(y)[[2]]
- if (!length(etastart)) {
- apar.init = rep(.iapar, length.out = n)
- etastart = cbind(theta2eta(apar.init, .lapar, earg = .eapar ))
- }
- }), list( .lapar = lapar, .eapar=eapar, .iapar=iapar))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- apar = eta2theta(eta, .lapar, earg = .eapar )
- fv.matrix = matrix(0.5, length(apar), 2)
- if (length(extra$dimnamesy2))
- dimnames(fv.matrix) = list(names(eta), extra$dimnamesy2)
- fv.matrix
- }, list(.lapar = lapar, .eapar=eapar ))),
- last = eval(substitute(expression({
- misc$link = c("apar"= .lapar)
- misc$earg = list("apar"= .eapar )
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
- misc$pooled.weight = pooled.weight
- }), list(.lapar = lapar, .eapar=eapar, .nsimEIM = nsimEIM ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- apar = eta2theta(eta, .lapar, earg = .eapar )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dfrank(x1=y[, 1], x2=y[, 2], alpha=apar, log = TRUE))
- }
- }, list(.lapar = lapar, .eapar=eapar ))),
- vfamily = c("frank"),
- deriv = eval(substitute(expression({
- apar = eta2theta(eta, .lapar, earg = .eapar )
- dapar.deta = dtheta.deta(apar, .lapar, earg = .eapar )
-
- de3 = deriv3(~ (log((apar - 1) * log(apar)) + (y1+y2)*log(apar) -
- 2 * log(apar-1 + (apar^y1 - 1) * (apar^y2 - 1))),
- name = "apar", hessian= TRUE)
-
- denom = apar-1 + (apar^y[, 1] - 1) * (apar^y[, 2] - 1)
- tmp700 = 2*apar^(y[, 1]+y[, 2]) - apar^y[, 1] - apar^y[, 2]
- numerator = 1 + y[, 1] * apar^(y[, 1] - 1) * (apar^y[, 2] - 1) +
- y[, 2] * apar^(y[, 2] - 1) * (apar^y[, 1] - 1)
- Dl.dapar = 1/(apar - 1) + 1/(apar*log(apar)) + (y[, 1]+y[, 2])/apar -
- 2 * numerator / denom
- w * Dl.dapar * dapar.deta
- }), list(.lapar = lapar, .eapar=eapar, .nsimEIM = nsimEIM ))),
- weight = eval(substitute(expression({
- if ( is.Numeric( .nsimEIM)) {
-
- pooled.weight = FALSE # For @last
-
-
- run.mean = 0
- for(ii in 1:( .nsimEIM )) {
- ysim = rfrank(n,alpha=apar)
- y1 = ysim[, 1]; y2 = ysim[, 2];
- eval.de3 = eval(de3)
- d2l.dthetas2 = attr(eval.de3, "hessian")
- rm(ysim)
- temp3 = -d2l.dthetas2[, 1, 1] # M = 1
- run.mean = ((ii - 1) * run.mean + temp3) / ii
- }
- wz = if (intercept.only)
- matrix(mean(run.mean), n, dimm(M)) else run.mean
+ frank <- function(lapar = "loge", iapar = 2, nsimEIM = 250) {
- wz = wz * dapar.deta^2
- c(w) * wz
- } else {
+ lapar <- as.list(substitute(lapar))
+ eapar <- link2list(lapar)
+ lapar <- attr(eapar, "function.name")
+
+
+ if (!is.Numeric(iapar, positive = TRUE))
+ stop("'iapar' must be positive")
+
+
+ if (length(nsimEIM) &&
+ (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE) ||
+ nsimEIM <= 50))
+ stop("'nsimEIM' should be an integer greater than 50")
+
+
+ new("vglmff",
+ blurb = c("Frank's bivariate distribution\n",
+ "Links: ",
+ namesof("apar", lapar, earg = eapar )),
+ initialize = eval(substitute(expression({
+
+ if (any(y <= 0) || any(y >= 1))
+ stop("the response must have values between 0 and 1")
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = 1,
+ ncol.y.max = 2,
+ ncol.y.min = 2,
+ out.wy = TRUE,
+ colsyperw = 2,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+ predictors.names <-
+ c(namesof("apar", .lapar, earg = .eapar, short = TRUE))
+
+ if (length(dimnames(y)))
+ extra$dimnamesy2 = dimnames(y)[[2]]
+
+ if (!length(etastart)) {
+ apar.init = rep(.iapar, length.out = n)
+ etastart = cbind(theta2eta(apar.init, .lapar, earg = .eapar ))
+ }
+ }), list( .lapar = lapar, .eapar = eapar, .iapar = iapar))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ apar = eta2theta(eta, .lapar, earg = .eapar )
+ fv.matrix = matrix(0.5, length(apar), 2)
+ if (length(extra$dimnamesy2))
+ dimnames(fv.matrix) = list(names(eta), extra$dimnamesy2)
+ fv.matrix
+ }, list( .lapar = lapar, .eapar = eapar ))),
+ last = eval(substitute(expression({
+ misc$link = c("apar" = .lapar )
+
+ misc$earg = list("apar" = .eapar )
+
+ misc$expected = TRUE
+ misc$nsimEIM = .nsimEIM
+ misc$pooled.weight = pooled.weight
+ misc$multipleResponses <- FALSE
+ }), list( .lapar = lapar, .eapar = eapar, .nsimEIM = nsimEIM ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ apar = eta2theta(eta, .lapar, earg = .eapar )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dfrank(x1 = y[, 1], x2 = y[, 2],
+ alpha = apar, log = TRUE))
+ }
+ }, list( .lapar = lapar, .eapar = eapar ))),
+ vfamily = c("frank"),
+ deriv = eval(substitute(expression({
+ apar = eta2theta(eta, .lapar, earg = .eapar )
+ dapar.deta = dtheta.deta(apar, .lapar, earg = .eapar )
+
+ de3 = deriv3(~ (log((apar - 1) * log(apar)) + (y1+y2)*log(apar) -
+ 2 * log(apar-1 + (apar^y1 - 1) * (apar^y2 - 1))),
+ name = "apar", hessian = TRUE)
+
+ denom = apar-1 + (apar^y[, 1] - 1) * (apar^y[, 2] - 1)
+ tmp700 = 2*apar^(y[, 1]+y[, 2]) - apar^y[, 1] - apar^y[, 2]
+ numerator = 1 + y[, 1] * apar^(y[, 1] - 1) * (apar^y[, 2] - 1) +
+ y[, 2] * apar^(y[, 2] - 1) * (apar^y[, 1] - 1)
+ Dl.dapar = 1/(apar - 1) + 1/(apar*log(apar)) + (y[, 1]+y[, 2])/apar -
+ 2 * numerator / denom
+ c(w) * Dl.dapar * dapar.deta
+ }), list( .lapar = lapar, .eapar = eapar, .nsimEIM = nsimEIM ))),
+ weight = eval(substitute(expression({
+ if ( is.Numeric( .nsimEIM)) {
+
+ pooled.weight = FALSE # For @last
+
+
+ run.mean = 0
+ for(ii in 1:( .nsimEIM )) {
+ ysim = rfrank(n,alpha=apar)
+ y1 = ysim[, 1]; y2 = ysim[, 2];
+ eval.de3 = eval(de3)
+ d2l.dthetas2 = attr(eval.de3, "hessian")
+ rm(ysim)
+ temp3 = -d2l.dthetas2[, 1, 1] # M = 1
+ run.mean = ((ii - 1) * run.mean + temp3) / ii
+ }
+ wz = if (intercept.only)
+ matrix(mean(run.mean), n, dimm(M)) else run.mean
+
+ wz = wz * dapar.deta^2
+ c(w) * wz
+ } else {
nump = apar^(y[, 1]+y[, 2]-2) * (2 * y[, 1] * y[, 2] +
y[, 1]*(y[, 1] - 1) + y[, 2]*(y[, 2] - 1)) -
y[, 1]*(y[, 1] - 1) * apar^(y[, 1]-2) -
@@ -706,186 +853,240 @@ frank.control <- function(save.weight = TRUE, ...)
(y[, 1]+y[, 2])/apar^2 + 2 *
(nump / denom - (numerator/denom)^2)
d2apar.deta2 = d2theta.deta2(apar, .lapar)
- wz = w * (dapar.deta^2 * D2l.dapar2 - Dl.dapar * d2apar.deta2)
+ wz = c(w) * (dapar.deta^2 * D2l.dapar2 - Dl.dapar * d2apar.deta2)
if (TRUE && intercept.only) {
wz = cbind(wz)
- sumw = sum(w)
- for(iii in 1:ncol(wz))
- wz[,iii] = sum(wz[,iii]) / sumw
- pooled.weight = TRUE
- wz = c(w) * wz # Put back the weights
- } else
- pooled.weight = FALSE
- wz
+ sumw = sum(w)
+ for(iii in 1:ncol(wz))
+ wz[,iii] = sum(wz[, iii]) / sumw
+ pooled.weight = TRUE
+ wz = c(w) * wz # Put back the weights
+ } else {
+ pooled.weight = FALSE
}
- }), list( .lapar = lapar, .eapar=eapar, .nsimEIM = nsimEIM ))))
+ wz
+ }
+ }), list( .lapar = lapar, .eapar = eapar, .nsimEIM = nsimEIM ))))
}
- gammahyp = function(ltheta = "loge", itheta = NULL, expected = FALSE) {
- if (mode(ltheta) != "character" && mode(ltheta) != "name")
- ltheta = as.character(substitute(ltheta))
- if (!is.logical(expected) || length(expected) != 1)
- stop("argument 'expected' must be a single logical")
-
- new("vglmff",
- blurb = c("Gamma hyperbola bivariate distribution\n",
- "Links: ",
- namesof("theta", ltheta)),
- initialize = eval(substitute(expression({
- if (!is.matrix(y) || ncol(y) != 2)
- stop("the response must be a 2 column matrix")
- if (any(y[, 1] <= 0) || any(y[, 2] <= 1))
- stop("the response has values that are out of range")
- predictors.names = c(namesof("theta", .ltheta, short = TRUE))
- if (!length(etastart)) {
- theta.init = if (length( .itheta)) rep(.itheta, length.out = n) else {
- 1 / (y[, 2] - 1 + 0.01)
- }
- etastart = cbind(theta2eta(theta.init, .ltheta))
- }
- }), list(.ltheta=ltheta, .itheta=itheta))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- theta = eta2theta(eta, .ltheta)
- cbind(theta*exp(theta), 1+1/theta)
- }, list(.ltheta=ltheta))),
- last = eval(substitute(expression({
- misc$link = c("theta"= .ltheta)
- misc$expected = .expected
- }), list(.ltheta=ltheta, .expected=expected))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- theta = eta2theta(eta, .ltheta)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * (-exp(-theta)*y[, 1]/theta - theta*y[, 2]))
- }
- }, list(.ltheta=ltheta))),
- vfamily = c("gammahyp"),
- deriv = eval(substitute(expression({
- theta = eta2theta(eta, .ltheta)
- Dl.dtheta = exp(-theta) * y[, 1] * (1+theta) / theta^2 - y[, 2]
- Dtheta.deta = dtheta.deta(theta, .ltheta)
- w * Dl.dtheta * Dtheta.deta
- }), list(.ltheta=ltheta))),
- weight = eval(substitute(expression({
- temp300 = 2 + theta * (2 + theta)
- if ( .expected) {
- D2l.dtheta2 = temp300 / theta^2
- wz = w * Dtheta.deta^2 * D2l.dtheta2
- } else {
- D2l.dtheta2 = temp300 * y[, 1] * exp(-theta) / theta^3
- D2theta.deta2 = d2theta.deta2(theta, .ltheta)
- wz = w * (Dtheta.deta^2 * D2l.dtheta2 - Dl.dtheta * D2theta.deta2)
- }
- wz
- }), list( .expected=expected, .ltheta=ltheta))))
+
+
+ gammahyp <- function(ltheta = "loge", itheta = NULL, expected = FALSE) {
+
+ ltheta <- as.list(substitute(ltheta))
+ etheta <- link2list(ltheta)
+ ltheta <- attr(etheta, "function.name")
+
+ if (!is.logical(expected) || length(expected) != 1)
+ stop("argument 'expected' must be a single logical")
+
+
+ new("vglmff",
+ blurb = c("Gamma hyperbola bivariate distribution\n",
+ "Links: ",
+ namesof("theta", ltheta, etheta)),
+ initialize = eval(substitute(expression({
+ if (any(y[, 1] <= 0) || any(y[, 2] <= 1))
+ stop("the response has values that are out of range")
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = 1,
+ ncol.y.max = 2,
+ ncol.y.min = 2,
+ out.wy = TRUE,
+ colsyperw = 2,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+ predictors.names <-
+ c(namesof("theta", .ltheta, .etheta , short = TRUE))
+
+ if (!length(etastart)) {
+ theta.init = if (length( .itheta)) {
+ rep( .itheta , length.out = n)
+ } else {
+ 1 / (y[, 2] - 1 + 0.01)
+ }
+ etastart =
+ cbind(theta2eta(theta.init, .ltheta , .etheta ))
+ }
+ }), list( .ltheta = ltheta, .etheta = etheta, .itheta = itheta))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ theta = eta2theta(eta, .ltheta , .etheta )
+ cbind(theta*exp(theta), 1+1/theta)
+ }, list( .ltheta = ltheta, .etheta = etheta ))),
+ last = eval(substitute(expression({
+ misc$link = c("theta" = .ltheta )
+ misc$earg = list("theta" = .etheta )
+
+ misc$expected = .expected
+ misc$multipleResponses <- FALSE
+ }), list( .ltheta = ltheta, .etheta = etheta, .expected = expected ))),
+
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ theta = eta2theta(eta, .ltheta , .etheta )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * (-exp(-theta)*y[, 1]/theta - theta*y[, 2]))
+ }
+ }, list( .ltheta = ltheta, .etheta = etheta ))),
+ vfamily = c("gammahyp"),
+ deriv = eval(substitute(expression({
+ theta = eta2theta(eta, .ltheta , .etheta )
+ Dl.dtheta = exp(-theta) * y[, 1] * (1+theta) / theta^2 - y[, 2]
+ DTHETA.deta = dtheta.deta(theta, .ltheta , .etheta )
+ c(w) * Dl.dtheta * DTHETA.deta
+ }), list( .ltheta = ltheta, .etheta = etheta ))),
+ weight = eval(substitute(expression({
+ temp300 = 2 + theta * (2 + theta)
+ if ( .expected ) {
+ D2l.dtheta2 = temp300 / theta^2
+ wz = c(w) * DTHETA.deta^2 * D2l.dtheta2
+ } else {
+ D2l.dtheta2 = temp300 * y[, 1] * exp(-theta) / theta^3
+ D2theta.deta2 = d2theta.deta2(theta, .ltheta )
+ wz = c(w) * (DTHETA.deta^2 * D2l.dtheta2 - Dl.dtheta * D2theta.deta2)
+ }
+ wz
+ }), list( .expected = expected, .ltheta = ltheta, .etheta = etheta ))))
}
- morgenstern = function(lapar = "rhobit", earg = list(), iapar = NULL, tola0 = 0.01,
- imethod = 1) {
- if (mode(lapar) != "character" && mode(lapar) != "name")
- lapar = as.character(substitute(lapar))
- if (!is.list(earg)) earg = list()
+ morgenstern <- function(lapar = "rhobit",
+ iapar = NULL, tola0 = 0.01,
+ imethod = 1) {
+ lapar <- as.list(substitute(lapar))
+ earg <- link2list(lapar)
+ lapar <- attr(earg, "function.name")
- if (length(iapar) &&
- (!is.Numeric(iapar, allowable.length = 1) ||
- abs(iapar) >= 1))
- stop("argument 'iapar' must be a single number between -1 and 1")
+ if (length(iapar) &&
+ (!is.Numeric(iapar, allowable.length = 1) ||
+ abs(iapar) >= 1))
+ stop("argument 'iapar' must be a single number between -1 and 1")
- if (!is.Numeric(tola0, allowable.length = 1, positive = TRUE))
- stop("argument 'tola0' must be a single positive number")
- if (length(iapar) && abs(iapar) <= tola0)
- stop("argument 'iapar' must not be between -tola0 and tola0")
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2.5)
- stop("argument 'imethod' must be 1 or 2")
-
- new("vglmff",
- blurb = c("Morgenstern's bivariate exponential distribution\n",
- "Links: ",
- namesof("apar", lapar, earg = earg )),
- initialize = eval(substitute(expression({
- if (!is.matrix(y) || ncol(y) != 2)
- stop("the response must be a 2 column matrix")
- if (any(y < 0))
- stop("the response must have non-negative values only")
- predictors.names = c(namesof("apar", .lapar, earg = .earg , short = TRUE))
- if (length(dimnames(y)))
- extra$dimnamesy2 = dimnames(y)[[2]]
- if (!length(etastart)) {
- ainit = if (length(.iapar)) rep(.iapar, length.out = n) else {
- mean1 = if ( .imethod == 1) median(y[, 1]) else mean(y[, 1])
- mean2 = if ( .imethod == 1) median(y[, 2]) else mean(y[, 2])
- Finit = 0.01 + mean(y[, 1] <= mean1 & y[, 2] <= mean2)
- ((Finit+expm1(-mean1)+exp(-mean2)) / exp(-mean1-mean2) - 1)/(
- expm1(-mean1) * expm1(-mean2))
- }
- etastart = theta2eta(rep(ainit, length.out = n), .lapar, earg = .earg )
- }
- }), list( .iapar=iapar, .lapar = lapar, .earg = earg,
- .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- alpha = eta2theta(eta, .lapar, earg = .earg )
- fv.matrix = matrix(1, length(alpha), 2)
- if (length(extra$dimnamesy2))
- dimnames(fv.matrix) = list(names(eta), extra$dimnamesy2)
- fv.matrix
- }, list( .lapar = lapar, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = c("apar"= .lapar)
- misc$earg = list(apar = .earg)
- misc$expected = FALSE
- misc$pooled.weight = pooled.weight
- }), list( .lapar = lapar, .earg = earg ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- alpha = eta2theta(eta, .lapar, earg = .earg )
- alpha[abs(alpha) < .tola0 ] = .tola0
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- denom = (1 + alpha - 2*alpha*(exp(-y[, 1]) + exp(-y[, 2])) +
- 4*alpha*exp(-y[, 1] - y[, 2]))
- sum(w * (-y[, 1] - y[, 2] + log(denom)))
- }
- }, list( .lapar = lapar, .earg = earg, .tola0=tola0 ))),
- vfamily = c("morgenstern"),
- deriv = eval(substitute(expression({
- alpha = eta2theta(eta, .lapar, earg = .earg )
- alpha[abs(alpha) < .tola0 ] = .tola0
- numerator = 1 - 2*(exp(-y[, 1]) + exp(-y[, 2])) + 4*exp(-y[, 1] - y[, 2])
- denom = (1 + alpha - 2*alpha*(exp(-y[, 1]) + exp(-y[, 2])) +
- 4 *alpha*exp(-y[, 1] - y[, 2]))
- dl.dalpha = numerator / denom
- dalpha.deta = dtheta.deta(alpha, .lapar, earg = .earg )
- c(w) * cbind(dl.dalpha * dalpha.deta)
- }), list( .lapar = lapar, .earg = earg, .tola0=tola0 ))),
- weight = eval(substitute(expression({
- d2l.dalpha2 = dl.dalpha^2
- d2alpha.deta2 = d2theta.deta2(alpha, .lapar, earg = .earg )
- wz = w * (dalpha.deta^2 * d2l.dalpha2 - d2alpha.deta2 * dl.dalpha)
- if (TRUE &&
- intercept.only) {
- wz = cbind(wz)
- sumw = sum(w)
- for(iii in 1:ncol(wz))
- wz[,iii] = sum(wz[,iii]) / sumw
- pooled.weight = TRUE
- wz = c(w) * wz # Put back the weights
- } else
- pooled.weight = FALSE
- wz
- }), list( .lapar = lapar, .earg = earg ))))
+ if (!is.Numeric(tola0, allowable.length = 1, positive = TRUE))
+ stop("argument 'tola0' must be a single positive number")
+
+ if (length(iapar) && abs(iapar) <= tola0)
+ stop("argument 'iapar' must not be between -tola0 and tola0")
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2.5)
+ stop("argument 'imethod' must be 1 or 2")
+
+
+ new("vglmff",
+ blurb = c("Morgenstern's bivariate exponential distribution\n",
+ "Links: ",
+ namesof("apar", lapar, earg = earg )),
+ initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ ncol.w.max = 1,
+ ncol.y.max = 2,
+ ncol.y.min = 2,
+ out.wy = TRUE,
+ colsyperw = 2,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+ predictors.names <-
+ c(namesof("apar", .lapar, earg = .earg , short = TRUE))
+
+ if (length(dimnames(y)))
+ extra$dimnamesy2 = dimnames(y)[[2]]
+
+ if (!length(etastart)) {
+ ainit = if (length(.iapar)) rep( .iapar , length.out = n) else {
+ mean1 = if ( .imethod == 1) median(y[, 1]) else mean(y[, 1])
+ mean2 = if ( .imethod == 1) median(y[, 2]) else mean(y[, 2])
+ Finit = 0.01 + mean(y[, 1] <= mean1 & y[, 2] <= mean2)
+ ((Finit+expm1(-mean1)+exp(-mean2)) / exp(-mean1-mean2) - 1) / (
+ expm1(-mean1) * expm1(-mean2))
+ }
+ etastart =
+ theta2eta(rep(ainit, length.out = n), .lapar, earg = .earg )
+ }
+ }), list( .iapar = iapar, .lapar = lapar, .earg = earg,
+ .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ alpha = eta2theta(eta, .lapar, earg = .earg )
+ fv.matrix = matrix(1, length(alpha), 2)
+ if (length(extra$dimnamesy2))
+ dimnames(fv.matrix) = list(names(eta), extra$dimnamesy2)
+ fv.matrix
+ }, list( .lapar = lapar, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link = c("apar" = .lapar )
+
+ misc$earg = list(apar = .earg )
+
+ misc$expected = FALSE
+ misc$pooled.weight = pooled.weight
+ misc$multipleResponses <- FALSE
+ }), list( .lapar = lapar, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ alpha = eta2theta(eta, .lapar, earg = .earg )
+ alpha[abs(alpha) < .tola0 ] = .tola0
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ denom = (1 + alpha - 2*alpha*(exp(-y[, 1]) + exp(-y[, 2])) +
+ 4*alpha*exp(-y[, 1] - y[, 2]))
+ sum(c(w) * (-y[, 1] - y[, 2] + log(denom)))
+ }
+ }, list( .lapar = lapar, .earg = earg, .tola0=tola0 ))),
+ vfamily = c("morgenstern"),
+ deriv = eval(substitute(expression({
+ alpha = eta2theta(eta, .lapar, earg = .earg )
+ alpha[abs(alpha) < .tola0 ] = .tola0
+ numerator = 1 - 2*(exp(-y[, 1]) + exp(-y[, 2])) +
+ 4*exp(-y[, 1] - y[, 2])
+ denom = (1 + alpha - 2*alpha*(exp(-y[, 1]) + exp(-y[, 2])) +
+ 4 *alpha*exp(-y[, 1] - y[, 2]))
+ dl.dalpha = numerator / denom
+
+ dalpha.deta = dtheta.deta(alpha, .lapar, earg = .earg )
+
+ c(w) * cbind(dl.dalpha * dalpha.deta)
+ }), list( .lapar = lapar, .earg = earg, .tola0=tola0 ))),
+ weight = eval(substitute(expression({
+ d2l.dalpha2 = dl.dalpha^2
+ d2alpha.deta2 = d2theta.deta2(alpha, .lapar, earg = .earg )
+ wz = c(w) * (dalpha.deta^2 * d2l.dalpha2 - d2alpha.deta2 * dl.dalpha)
+ if (TRUE &&
+ intercept.only) {
+ wz = cbind(wz)
+ sumw = sum(w)
+ for(iii in 1:ncol(wz))
+ wz[,iii] = sum(wz[, iii]) / sumw
+ pooled.weight = TRUE
+ wz = c(w) * wz # Put back the weights
+ } else {
+ pooled.weight = FALSE
+ }
+ wz
+ }), list( .lapar = lapar, .earg = earg ))))
}
-rfgm = function(n, alpha) {
+rfgm <- function(n, alpha) {
if (!is.Numeric(n, positive = TRUE,
allowable.length = 1, integer.valued = TRUE))
stop("bad input for argument 'n'")
@@ -905,226 +1106,278 @@ rfgm = function(n, alpha) {
-dfgm = function(x1, x2, alpha, log = FALSE) {
- log.arg = log
- rm(log)
- if (!is.Numeric(alpha)) stop("bad input for 'alpha'")
- if (any(abs(alpha) > 1)) stop("'alpha' values out of range")
- if ( !is.logical( log.arg ) || length( log.arg ) != 1 )
- stop("bad input for argument 'log'")
-
- L = max(length(x1), length(x2), length(alpha))
- if (length(x1) != L) x1 = rep(x1, length.out = L)
- if (length(x2) != L) x2 = rep(x2, length.out = L)
- if (length(alpha) != L) alpha = rep(alpha, length.out = L)
- ans = 0 * x1
- xnok = (x1 <= 0) | (x1 >= 1) | (x2 <= 0) | (x2 >= 1)
- if ( log.arg ) {
- ans[!xnok] = log1p(alpha[!xnok] * (1-2*x1[!xnok]) * (1-2*x2[!xnok]))
- ans[xnok] = log(0)
- } else {
- ans[!xnok] = 1 + alpha[!xnok] * (1-2*x1[!xnok]) * (1-2*x2[!xnok])
- ans[xnok] = 0
- if (any(ans<0))
- stop("negative values in the density (alpha out of range)")
- }
- ans
-}
+dfgm <- function(x1, x2, alpha, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
-pfgm = function(q1, q2, alpha) {
- if (!is.Numeric(q1)) stop("bad input for 'q1'")
- if (!is.Numeric(q2)) stop("bad input for 'q2'")
- if (!is.Numeric(alpha)) stop("bad input for 'alpha'")
- if (any(abs(alpha) > 1)) stop("'alpha' values out of range")
+ if (!is.Numeric(alpha))
+ stop("bad input for 'alpha'")
+ if (any(abs(alpha) > 1))
+ stop("'alpha' values out of range")
+ if ( !is.logical( log.arg ) ||
+ length( log.arg ) != 1 )
+ stop("bad input for argument 'log'")
+
+ L = max(length(x1), length(x2), length(alpha))
+ if (length(x1) != L) x1 = rep(x1, length.out = L)
+ if (length(x2) != L) x2 = rep(x2, length.out = L)
+ if (length(alpha) != L) alpha = rep(alpha, length.out = L)
+ ans = 0 * x1
+ xnok = (x1 <= 0) | (x1 >= 1) | (x2 <= 0) | (x2 >= 1)
+ if ( log.arg ) {
+ ans[!xnok] = log1p(alpha[!xnok] * (1-2*x1[!xnok]) * (1-2*x2[!xnok]))
+ ans[xnok] = log(0)
+ } else {
+ ans[!xnok] = 1 + alpha[!xnok] * (1-2*x1[!xnok]) * (1-2*x2[!xnok])
+ ans[xnok] = 0
+ if (any(ans<0))
+ stop("negative values in the density (alpha out of range)")
+ }
+ ans
+}
- L = max(length(q1), length(q2), length(alpha))
- if (length(q1) != L) q1 = rep(q1, length.out = L)
- if (length(q2) != L) q2 = rep(q2, length.out = L)
- if (length(alpha) != L) alpha = rep(alpha, length.out = L)
- x=q1; y=q2
- index = (x >= 1 & y<1) | (y >= 1 & x<1) | (x <= 0 | y <= 0) | (x >= 1 & y >= 1)
- ans = as.numeric(index)
- if (any(!index)) {
- ans[!index] = q1[!index] * q2[!index] * (1 + alpha[!index] *
- (1-q1[!index])*(1-q2[!index]))
- }
- ans[x >= 1 & y<1] = y[x >= 1 & y<1] # P(Y2 < q2) = q2
- ans[y >= 1 & x<1] = x[y >= 1 & x<1] # P(Y1 < q1) = q1
- ans[x <= 0 | y <= 0] = 0
- ans[x >= 1 & y >= 1] = 1
- ans
+pfgm <- function(q1, q2, alpha) {
+ if (!is.Numeric(q1)) stop("bad input for 'q1'")
+ if (!is.Numeric(q2)) stop("bad input for 'q2'")
+ if (!is.Numeric(alpha)) stop("bad input for 'alpha'")
+ if (any(abs(alpha) > 1)) stop("'alpha' values out of range")
+
+ L = max(length(q1), length(q2), length(alpha))
+ if (length(q1) != L) q1 = rep(q1, length.out = L)
+ if (length(q2) != L) q2 = rep(q2, length.out = L)
+ if (length(alpha) != L) alpha = rep(alpha, length.out = L)
+
+ x=q1; y=q2
+ index = (x >= 1 & y < 1) |
+ (y >= 1 & x < 1) |
+ (x <= 0 | y <= 0) |
+ (x >= 1 & y >= 1)
+ ans = as.numeric(index)
+ if (any(!index)) {
+ ans[!index] = q1[!index] * q2[!index] * (1 + alpha[!index] *
+ (1-q1[!index])*(1-q2[!index]))
+ }
+ ans[x >= 1 & y<1] = y[x >= 1 & y<1] # P(Y2 < q2) = q2
+ ans[y >= 1 & x<1] = x[y >= 1 & x<1] # P(Y1 < q1) = q1
+ ans[x <= 0 | y <= 0] = 0
+ ans[x >= 1 & y >= 1] = 1
+ ans
}
fgm.control <- function(save.weight = TRUE, ...)
{
- list(save.weight=save.weight)
+ list(save.weight = save.weight)
}
- fgm = function(lapar = "rhobit", earg = list(), iapar = NULL,
- imethod = 1, nsimEIM = 200) {
- if (mode(lapar) != "character" && mode(lapar) != "name")
- lapar = as.character(substitute(lapar))
- if (!is.list(earg)) earg = list()
+ fgm <- function(lapar = "rhobit", iapar = NULL,
+ imethod = 1, nsimEIM = 200) {
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2.5)
- stop("argument 'imethod' must be 1 or 2")
- if (!length(nsimEIM) ||
- (!is.Numeric(nsimEIM, allowable.length = 1,
- integer.valued = TRUE) ||
- nsimEIM <= 50))
- stop("'nsimEIM' should be an integer greater than 50")
- if (length(iapar) &&
- (abs(iapar) >= 1))
- stop("'iapar' should be less than 1 in absolute value")
-
-
- new("vglmff",
- blurb = c("Farlie-Gumbel-Morgenstern distribution\n",
- "Links: ",
- namesof("apar", lapar, earg = earg )),
- initialize = eval(substitute(expression({
- if (!is.matrix(y) || ncol(y) != 2)
- stop("the response must be a 2 column matrix")
- if (any(y < 0) || any(y > 1))
- stop("the response must have values in the unit square")
- predictors.names = namesof("apar", .lapar, earg = .earg, short = TRUE)
- if (length(dimnames(y)))
- extra$dimnamesy2 = dimnames(y)[[2]]
- if (!length(etastart)) {
- ainit = if (length( .iapar )) .iapar else {
- mean1 = if ( .imethod == 1) weighted.mean(y[, 1],w) else
- median(y[, 1])
- mean2 = if ( .imethod == 1) weighted.mean(y[, 2],w) else
- median(y[, 2])
- Finit = weighted.mean(y[, 1] <= mean1 & y[, 2] <= mean2, w)
- (Finit / (mean1 * mean2) - 1) / ((1-mean1) * (1-mean2))
- }
+ lapar <- as.list(substitute(lapar))
+ earg <- link2list(lapar)
+ lapar <- attr(earg, "function.name")
- ainit = min(0.95, max(ainit, -0.95))
- etastart = theta2eta(rep(ainit, length.out = n), .lapar, earg = .earg )
- }
- }), list( .iapar=iapar, .lapar = lapar, .earg = earg,
- .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- alpha = eta2theta(eta, .lapar, earg = .earg )
- fv.matrix = matrix(0.5, length(alpha), 2)
- if (length(extra$dimnamesy2))
- dimnames(fv.matrix) = list(names(eta), extra$dimnamesy2)
- fv.matrix
- }, list( .lapar = lapar, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = c("apar"= .lapar)
- misc$earg = list(apar = .earg)
- misc$expected = FALSE
- misc$nsimEIM = .nsimEIM
- }), list(.lapar = lapar, .earg = earg, .nsimEIM = nsimEIM ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- alpha = eta2theta(eta, .lapar, earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dfgm(x1=y[, 1], x2=y[, 2], alpha=alpha, log = TRUE))
- }
- }, list( .lapar = lapar, .earg = earg ))),
- vfamily = c("fgm"),
- deriv = eval(substitute(expression({
- alpha = eta2theta(eta, .lapar, earg = .earg )
- dalpha.deta = dtheta.deta(alpha, .lapar, earg = .earg )
- numerator = (1 - 2 * y[, 1]) * (1 - 2 * y[, 2])
- denom = 1 + alpha * numerator
- mytolerance = .Machine$double.eps
- bad <- (denom <= mytolerance) # Range violation
- if (any(bad)) {
- cat("There are some range violations in @deriv\n")
- flush.console()
- denom[bad] = 2 * mytolerance
- }
- dl.dalpha = numerator / denom
- c(w) * cbind(dl.dalpha * dalpha.deta)
- }), list( .lapar = lapar, .earg = earg, .nsimEIM = nsimEIM ))),
- weight = eval(substitute(expression({
- run.var = 0
- for(ii in 1:( .nsimEIM )) {
- ysim = rfgm(n, alpha=alpha)
- numerator = (1 - 2 * ysim[, 1]) * (1 - 2 * ysim[, 2])
- denom = 1 + alpha * numerator
- dl.dalpha = numerator / denom
- rm(ysim)
- temp3 = dl.dalpha
- run.var = ((ii - 1) * run.var + temp3^2) / ii
- }
- wz = if (intercept.only)
- matrix(colMeans(cbind(run.var)),
- n, dimm(M), byrow = TRUE) else cbind(run.var)
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2.5)
+ stop("argument 'imethod' must be 1 or 2")
+ if (!length(nsimEIM) ||
+ (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE) ||
+ nsimEIM <= 50))
+ stop("'nsimEIM' should be an integer greater than 50")
+ if (length(iapar) &&
+ (abs(iapar) >= 1))
+ stop("'iapar' should be less than 1 in absolute value")
+
+
+ new("vglmff",
+ blurb = c("Farlie-Gumbel-Morgenstern distribution\n",
+ "Links: ",
+ namesof("apar", lapar, earg = earg )),
+ initialize = eval(substitute(expression({
+ if (any(y < 0) || any(y > 1))
+ stop("the response must have values in the unit square")
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ ncol.w.max = 1,
+ ncol.y.max = 2,
+ ncol.y.min = 2,
+ out.wy = TRUE,
+ colsyperw = 2,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ predictors.names <-
+ namesof("apar", .lapar, earg = .earg , short = TRUE)
+
+ if (length(dimnames(y)))
+ extra$dimnamesy2 = dimnames(y)[[2]]
+
+ if (!length(etastart)) {
+ ainit = if (length( .iapar )) .iapar else {
+ mean1 = if ( .imethod == 1) weighted.mean(y[, 1], w) else
+ median(y[, 1])
+ mean2 = if ( .imethod == 1) weighted.mean(y[, 2], w) else
+ median(y[, 2])
+ Finit = weighted.mean(y[, 1] <= mean1 & y[, 2] <= mean2, w)
+ (Finit / (mean1 * mean2) - 1) / ((1-mean1) * (1-mean2))
+ }
+
+ ainit = min(0.95, max(ainit, -0.95))
+
+ etastart =
+ theta2eta(rep(ainit, length.out = n), .lapar, earg = .earg )
+ }
+ }), list( .iapar = iapar, .lapar = lapar, .earg = earg,
+ .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ alpha = eta2theta(eta, .lapar, earg = .earg )
+ fv.matrix = matrix(0.5, length(alpha), 2)
+ if (length(extra$dimnamesy2))
+ dimnames(fv.matrix) = list(names(eta), extra$dimnamesy2)
+ fv.matrix
+ }, list( .lapar = lapar, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link = c("apar" = .lapar )
+
+ misc$earg = list(apar = .earg )
+
+ misc$expected = FALSE
+ misc$nsimEIM = .nsimEIM
+ misc$multipleResponses <- FALSE
+ }), list( .lapar = lapar, .earg = earg, .nsimEIM = nsimEIM ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ alpha = eta2theta(eta, .lapar, earg = .earg )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dfgm(x1=y[, 1], x2=y[, 2], alpha=alpha, log = TRUE))
+ }
+ }, list( .lapar = lapar, .earg = earg ))),
+ vfamily = c("fgm"),
+ deriv = eval(substitute(expression({
+ alpha = eta2theta(eta, .lapar, earg = .earg )
+
+ dalpha.deta = dtheta.deta(alpha, .lapar, earg = .earg )
+
+ numerator = (1 - 2 * y[, 1]) * (1 - 2 * y[, 2])
+ denom = 1 + alpha * numerator
+
+ mytolerance = .Machine$double.eps
+ bad <- (denom <= mytolerance) # Range violation
+ if (any(bad)) {
+ cat("There are some range violations in @deriv\n")
+ flush.console()
+ denom[bad] = 2 * mytolerance
+ }
+ dl.dalpha = numerator / denom
+ c(w) * cbind(dl.dalpha * dalpha.deta)
+ }), list( .lapar = lapar, .earg = earg, .nsimEIM = nsimEIM ))),
+ weight = eval(substitute(expression({
+ run.var = 0
+ for(ii in 1:( .nsimEIM )) {
+ ysim = rfgm(n, alpha=alpha)
+ numerator = (1 - 2 * ysim[, 1]) * (1 - 2 * ysim[, 2])
+ denom = 1 + alpha * numerator
+ dl.dalpha = numerator / denom
+ rm(ysim)
+ temp3 = dl.dalpha
+ run.var = ((ii - 1) * run.var + temp3^2) / ii
+ }
+ wz = if (intercept.only)
+ matrix(colMeans(cbind(run.var)),
+ n, dimm(M), byrow = TRUE) else cbind(run.var)
- wz = wz * dalpha.deta^2
- c(w) * wz
- }), list( .lapar = lapar, .earg = earg, .nsimEIM = nsimEIM ))))
+ wz = wz * dalpha.deta^2
+ c(w) * wz
+ }), list( .lapar = lapar, .earg = earg, .nsimEIM = nsimEIM ))))
}
- gumbelIbiv = function(lapar = "identity", earg = list(),
- iapar = NULL, imethod = 1) {
- if (mode(lapar) != "character" && mode(lapar) != "name")
- lapar = as.character(substitute(lapar))
- if (!is.list(earg)) earg = list()
+ gumbelIbiv <- function(lapar = "identity", iapar = NULL, imethod = 1) {
- if (length(iapar) &&
- !is.Numeric(iapar, allowable.length = 1))
- stop("'iapar' must be a single number")
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2.5)
- stop("argument 'imethod' must be 1 or 2")
+ lapar <- as.list(substitute(lapar))
+ earg <- link2list(lapar)
+ lapar <- attr(earg, "function.name")
- new("vglmff",
- blurb = c("Gumbel's Type I bivariate distribution\n",
- "Links: ",
- namesof("apar", lapar, earg = earg )),
- initialize = eval(substitute(expression({
- if (!is.matrix(y) || ncol(y) != 2)
- stop("the response must be a 2 column matrix")
- if (any(y < 0))
- stop("the response must have non-negative values only")
- predictors.names = c(namesof("apar", .lapar, earg = .earg , short = TRUE))
- if (!length(etastart)) {
- ainit = if (length( .iapar )) rep( .iapar, length.out = n) else {
- mean1 = if ( .imethod == 1) median(y[, 1]) else mean(y[, 1])
- mean2 = if ( .imethod == 1) median(y[, 2]) else mean(y[, 2])
- Finit = 0.01 + mean(y[, 1] <= mean1 & y[, 2] <= mean2)
- (log(Finit+expm1(-mean1)+exp(-mean2))+mean1+mean2)/(mean1*mean2)
- }
- etastart = theta2eta(rep(ainit, length.out = n), .lapar, earg = .earg )
- }
- }), list( .iapar=iapar, .lapar = lapar, .earg = earg,
- .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- alpha = eta2theta(eta, .lapar, earg = .earg )
- cbind(rep(1, len=length(alpha)),
- rep(1, len=length(alpha)))
- }, list( .lapar = lapar ))),
- last = eval(substitute(expression({
- misc$link = c("apar"= .lapar)
- misc$earg = list(apar = .earg)
- misc$expected = FALSE
- misc$pooled.weight = pooled.weight
- }), list( .lapar = lapar, .earg = earg ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- alpha = eta2theta(eta, .lapar, earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- denom = (alpha*y[, 1] - 1) * (alpha*y[, 2] - 1) + alpha
+
+ if (length(iapar) &&
+ !is.Numeric(iapar, allowable.length = 1))
+ stop("'iapar' must be a single number")
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2.5)
+ stop("argument 'imethod' must be 1 or 2")
+
+
+ new("vglmff",
+ blurb = c("Gumbel's Type I bivariate distribution\n",
+ "Links: ",
+ namesof("apar", lapar, earg = earg )),
+ initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ ncol.w.max = 1,
+ ncol.y.max = 2,
+ ncol.y.min = 2,
+ out.wy = TRUE,
+ colsyperw = 2,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+ predictors.names <-
+ c(namesof("apar", .lapar , earg = .earg , short = TRUE))
+
+ if (!length(etastart)) {
+ ainit = if (length( .iapar )) rep( .iapar, length.out = n) else {
+ mean1 = if ( .imethod == 1) median(y[, 1]) else mean(y[, 1])
+ mean2 = if ( .imethod == 1) median(y[, 2]) else mean(y[, 2])
+ Finit = 0.01 + mean(y[, 1] <= mean1 & y[, 2] <= mean2)
+ (log(Finit+expm1(-mean1)+exp(-mean2))+mean1+mean2)/(mean1*mean2)
+ }
+ etastart =
+ theta2eta(rep(ainit, length.out = n), .lapar, earg = .earg )
+ }
+ }), list( .iapar = iapar, .lapar = lapar, .earg = earg,
+ .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ alpha = eta2theta(eta, .lapar, earg = .earg )
+ cbind(rep(1, len = length(alpha)),
+ rep(1, len = length(alpha)))
+ }, list( .lapar = lapar, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link = c("apar" = .lapar )
+
+ misc$earg = list("apar" = .earg )
+
+ misc$expected = FALSE
+ misc$pooled.weight = pooled.weight
+ misc$multipleResponses <- FALSE
+ }), list( .lapar = lapar, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ alpha = eta2theta(eta, .lapar, earg = .earg )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ denom = (alpha*y[, 1] - 1) * (alpha*y[, 2] - 1) + alpha
mytolerance = .Machine$double.xmin
bad <- (denom <= mytolerance) # Range violation
if (any(bad)) {
@@ -1135,33 +1388,37 @@ fgm.control <- function(save.weight = TRUE, ...)
sum(w[!bad] * (-y[!bad, 1] - y[!bad, 2] +
alpha[!bad]*y[!bad, 1]*y[!bad, 2] + log(denom[!bad])))
}
- }, list( .lapar = lapar, .earg = earg ))),
- vfamily = c("gumbelIbiv"),
- deriv = eval(substitute(expression({
- alpha = eta2theta(eta, .lapar, earg = .earg )
- numerator = (alpha*y[, 1] - 1)*y[, 2] + (alpha*y[, 2] - 1)*y[, 1] + 1
- denom = (alpha*y[, 1] - 1) * (alpha*y[, 2] - 1) + alpha
- denom = abs(denom)
- dl.dalpha = numerator / denom + y[, 1]*y[, 2]
- dalpha.deta = dtheta.deta(alpha, .lapar, earg = .earg )
- c(w) * cbind(dl.dalpha * dalpha.deta)
- }), list( .lapar = lapar, .earg = earg ))),
- weight = eval(substitute(expression({
- d2l.dalpha2 = (numerator/denom)^2 - 2*y[, 1]*y[, 2] / denom
- d2alpha.deta2 = d2theta.deta2(alpha, .lapar, earg = .earg )
- wz = w * (dalpha.deta^2 * d2l.dalpha2 - d2alpha.deta2 * dl.dalpha)
- if (TRUE &&
+ }, list( .lapar = lapar, .earg = earg ))),
+ vfamily = c("gumbelIbiv"),
+ deriv = eval(substitute(expression({
+ alpha = eta2theta(eta, .lapar, earg = .earg )
+ numerator = (alpha*y[, 1] - 1)*y[, 2] + (alpha*y[, 2] - 1)*y[, 1] + 1
+ denom = (alpha*y[, 1] - 1) * (alpha*y[, 2] - 1) + alpha
+ denom = abs(denom)
+
+ dl.dalpha = numerator / denom + y[, 1]*y[, 2]
+
+ dalpha.deta = dtheta.deta(alpha, .lapar, earg = .earg )
+
+ c(w) * cbind(dl.dalpha * dalpha.deta)
+ }), list( .lapar = lapar, .earg = earg ))),
+ weight = eval(substitute(expression({
+ d2l.dalpha2 = (numerator/denom)^2 - 2*y[, 1]*y[, 2] / denom
+ d2alpha.deta2 = d2theta.deta2(alpha, .lapar, earg = .earg )
+ wz = c(w) * (dalpha.deta^2 * d2l.dalpha2 - d2alpha.deta2 * dl.dalpha)
+ if (TRUE &&
intercept.only) {
wz = cbind(wz)
- sumw = sum(w)
- for(iii in 1:ncol(wz))
- wz[,iii] = sum(wz[,iii]) / sumw
- pooled.weight = TRUE
- wz = c(w) * wz # Put back the weights
- } else
- pooled.weight = FALSE
- wz
- }), list( .lapar = lapar, .earg = earg ))))
+ sumw = sum(w)
+ for(iii in 1:ncol(wz))
+ wz[, iii] = sum(wz[, iii]) / sumw
+ pooled.weight = TRUE
+ wz = c(w) * wz # Put back the weights
+ } else {
+ pooled.weight = FALSE
+ }
+ wz
+ }), list( .lapar = lapar, .earg = earg ))))
}
@@ -1170,7 +1427,7 @@ fgm.control <- function(save.weight = TRUE, ...)
-pplack = function(q1, q2, oratio) {
+pplack <- function(q1, q2, oratio) {
if (!is.Numeric(q1)) stop("bad input for 'q1'")
if (!is.Numeric(q2)) stop("bad input for 'q2'")
if (!is.Numeric(oratio, positive = TRUE)) stop("bad input for 'oratio'")
@@ -1203,7 +1460,7 @@ pplack = function(q1, q2, oratio) {
-rplack = function(n, oratio) {
+rplack <- function(n, oratio) {
if (!is.Numeric(n, positive = TRUE,
allowable.length = 1, integer.valued = TRUE))
stop("bad input for 'n'")
@@ -1218,14 +1475,16 @@ rplack = function(n, oratio) {
(1 - 2 * V) *
sqrt(oratio * (oratio + 4*Z*y1*(1-y1)*(1-oratio)^2))) / (oratio +
Z*(1-oratio)^2)
- matrix(c(y1, 0.5 * y2), nrow=n, ncol = 2)
+ matrix(c(y1, 0.5 * y2), nrow = n, ncol = 2)
}
-dplack = function(x1, x2, oratio, log = FALSE) {
- log.arg = log
- rm(log)
+dplack <- function(x1, x2, oratio, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
if (!is.Numeric(oratio, positive = TRUE))
stop("bad input for 'oratio'")
@@ -1253,143 +1512,175 @@ dplack = function(x1, x2, oratio, log = FALSE) {
plackett.control <- function(save.weight = TRUE, ...)
{
- list(save.weight=save.weight)
+ list(save.weight = save.weight)
}
- plackett = function(link = "loge", earg = list(),
- ioratio = NULL, imethod = 1, nsimEIM = 200) {
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
- if (length(ioratio) && (!is.Numeric(ioratio, positive = TRUE)))
- stop("'ioratio' must be positive")
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2) stop("imethod must be 1 or 2")
-
- new("vglmff",
- blurb = c("Plackett distribution\n",
- "Links: ",
- namesof("oratio", link, earg = earg )),
- initialize = eval(substitute(expression({
- if (!is.matrix(y) || ncol(y) != 2)
- stop("the response must be a 2 column matrix")
- if (any(y < 0) || any(y > 1))
- stop("the response must have values in the unit square")
- predictors.names = namesof("oratio", .link, earg = .earg, short = TRUE)
- if (length(dimnames(y)))
- extra$dimnamesy2 = dimnames(y)[[2]]
- if (!length(etastart)) {
- orinit = if (length( .ioratio )) .ioratio else {
- if ( .imethod == 2) {
- scorp = cor(y)[1, 2]
- if (abs(scorp) <= 0.1) 1 else
- if (abs(scorp) <= 0.3) 3^sign(scorp) else
- if (abs(scorp) <= 0.6) 5^sign(scorp) else
- if (abs(scorp) <= 0.8) 20^sign(scorp) else 40^sign(scorp)
- } else {
- y10 = weighted.mean(y[, 1], w)
- y20 = weighted.mean(y[, 2], w)
- (0.5 + sum(w[(y[, 1] < y10) & (y[, 2] < y20)])) *
- (0.5 + sum(w[(y[, 1] >= y10) & (y[, 2] >= y20)])) / (
- ((0.5 + sum(w[(y[, 1] < y10) & (y[, 2] >= y20)])) *
- (0.5 + sum(w[(y[, 1] >= y10) & (y[, 2] < y20)]))))
- }
- }
- etastart = theta2eta(rep(orinit, length.out = n), .link, earg = .earg)
- }
- }), list( .ioratio=ioratio, .link = link, .earg = earg,
- .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- oratio = eta2theta(eta, .link, earg = .earg )
- fv.matrix = matrix(0.5, length(oratio), 2)
- if (length(extra$dimnamesy2))
- dimnames(fv.matrix) = list(dimnames(eta)[[1]], extra$dimnamesy2)
- fv.matrix
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = c("oratio"= .link)
- misc$earg = list(oratio = .earg)
- misc$expected = FALSE
- misc$nsimEIM = .nsimEIM
- }), list( .link = link, .earg = earg,
- .nsimEIM = nsimEIM ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- oratio = eta2theta(eta, .link, earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dplack(x1= y[, 1], x2= y[, 2], oratio=oratio, log = TRUE))
- }
- }, list( .link = link, .earg = earg ))),
- vfamily = c("plackett"),
- deriv = eval(substitute(expression({
- oratio = eta2theta(eta, .link, earg = .earg )
- doratio.deta = dtheta.deta(oratio, .link, earg = .earg )
- y1 = y[, 1]
- y2 = y[, 2]
- de3 = deriv3(~ (log(oratio) + log(1+(oratio - 1) *
- (y1+y2-2*y1*y2)) - 1.5 *
- log((1 + (y1+y2)*(oratio - 1))^2 - 4 * oratio * (oratio - 1)*y1*y2)),
- name = "oratio", hessian= FALSE)
- eval.de3 = eval(de3)
- dl.doratio = attr(eval.de3, "gradient")
- w * dl.doratio * doratio.deta
- }), list( .link = link, .earg = earg ))),
- weight = eval(substitute(expression({
- sd3 = deriv3(~ (log(oratio) + log(1+(oratio - 1) *
- (y1sim+y2sim-2*y1sim*y2sim)) - 1.5 *
- log((1 + (y1sim+y2sim)*(oratio - 1))^2 -
- 4 * oratio * (oratio - 1)*y1sim*y2sim)),
- name = "oratio", hessian= FALSE)
- run.var = 0
- for(ii in 1:( .nsimEIM )) {
- ysim = rplack(n, oratio=oratio)
- y1sim = ysim[, 1]
- y2sim = ysim[, 1]
- eval.sd3 = eval(sd3)
- dl.doratio = attr(eval.sd3, "gradient")
- rm(ysim, y1sim, y2sim)
- temp3 = dl.doratio
- run.var = ((ii - 1) * run.var + temp3^2) / ii
+ plackett <- function(link = "loge", ioratio = NULL,
+ imethod = 1, nsimEIM = 200) {
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+ if (length(ioratio) && (!is.Numeric(ioratio, positive = TRUE)))
+ stop("'ioratio' must be positive")
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+
+
+ new("vglmff",
+ blurb = c("Plackett distribution\n",
+ "Links: ",
+ namesof("oratio", link, earg = earg )),
+ initialize = eval(substitute(expression({
+ if (any(y < 0) || any(y > 1))
+ stop("the response must have values in the unit square")
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ ncol.w.max = 1,
+ ncol.y.max = 2,
+ ncol.y.min = 2,
+ out.wy = TRUE,
+ colsyperw = 2,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ predictors.names <-
+ namesof("oratio", .link , earg = .earg, short = TRUE)
+
+ if (length(dimnames(y)))
+ extra$dimnamesy2 = dimnames(y)[[2]]
+
+ if (!length(etastart)) {
+ orinit = if (length( .ioratio )) .ioratio else {
+ if ( .imethod == 2) {
+ scorp = cor(y)[1, 2]
+ if (abs(scorp) <= 0.1) 1 else
+ if (abs(scorp) <= 0.3) 3^sign(scorp) else
+ if (abs(scorp) <= 0.6) 5^sign(scorp) else
+ if (abs(scorp) <= 0.8) 20^sign(scorp) else 40^sign(scorp)
+ } else {
+ y10 = weighted.mean(y[, 1], w)
+ y20 = weighted.mean(y[, 2], w)
+ (0.5 + sum(w[(y[, 1] < y10) & (y[, 2] < y20)])) *
+ (0.5 + sum(w[(y[, 1] >= y10) & (y[, 2] >= y20)])) / (
+ ((0.5 + sum(w[(y[, 1] < y10) & (y[, 2] >= y20)])) *
+ (0.5 + sum(w[(y[, 1] >= y10) & (y[, 2] < y20)]))))
+ }
}
- wz = if (intercept.only)
- matrix(colMeans(cbind(run.var)),
- n, dimm(M), byrow = TRUE) else cbind(run.var)
+ etastart = theta2eta(rep(orinit, length.out = n),
+ .link , earg = .earg )
+ }
+ }), list( .ioratio=ioratio, .link = link, .earg = earg,
+ .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ oratio = eta2theta(eta, .link , earg = .earg )
+ fv.matrix = matrix(0.5, length(oratio), 2)
+ if (length(extra$dimnamesy2))
+ dimnames(fv.matrix) = list(dimnames(eta)[[1]], extra$dimnamesy2)
+ fv.matrix
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link = c(oratio = .link)
+
+ misc$earg = list(oratio = .earg)
+
+ misc$expected = FALSE
+ misc$nsimEIM = .nsimEIM
+ misc$multipleResponses <- FALSE
+ }), list( .link = link, .earg = earg,
+ .nsimEIM = nsimEIM ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ oratio = eta2theta(eta, .link , earg = .earg )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dplack(x1 = y[, 1], x2 = y[, 2],
+ oratio = oratio, log = TRUE))
+ }
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("plackett"),
+ deriv = eval(substitute(expression({
+ oratio = eta2theta(eta, .link , earg = .earg )
+ doratio.deta = dtheta.deta(oratio, .link , earg = .earg )
+ y1 = y[, 1]
+ y2 = y[, 2]
+ de3 = deriv3(~ (log(oratio) + log(1+(oratio - 1) *
+ (y1+y2-2*y1*y2)) - 1.5 *
+ log((1 + (y1+y2)*(oratio - 1))^2 - 4 * oratio * (oratio - 1)*y1*y2)),
+ name = "oratio", hessian= FALSE)
+ eval.de3 = eval(de3)
+
+ dl.doratio = attr(eval.de3, "gradient")
+
+ c(w) * dl.doratio * doratio.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+ sd3 = deriv3(~ (log(oratio) + log(1+(oratio - 1) *
+ (y1sim+y2sim-2*y1sim*y2sim)) - 1.5 *
+ log((1 + (y1sim+y2sim)*(oratio - 1))^2 -
+ 4 * oratio * (oratio - 1)*y1sim*y2sim)),
+ name = "oratio", hessian= FALSE)
+ run.var = 0
+ for(ii in 1:( .nsimEIM )) {
+ ysim = rplack(n, oratio=oratio)
+ y1sim = ysim[, 1]
+ y2sim = ysim[, 1]
+ eval.sd3 = eval(sd3)
+ dl.doratio = attr(eval.sd3, "gradient")
+ rm(ysim, y1sim, y2sim)
+ temp3 = dl.doratio
+ run.var = ((ii - 1) * run.var + temp3^2) / ii
+ }
+ wz = if (intercept.only)
+ matrix(colMeans(cbind(run.var)),
+ n, dimm(M), byrow = TRUE) else cbind(run.var)
- wz = wz * doratio.deta^2
- c(w) * wz
- }), list( .link = link, .earg = earg, .nsimEIM = nsimEIM ))))
+ wz = wz * doratio.deta^2
+ c(w) * wz
+ }), list( .link = link, .earg = earg, .nsimEIM = nsimEIM ))))
}
-damh = function(x1, x2, alpha, log = FALSE) {
- log.arg = log
- rm(log)
- if (!is.Numeric(x1)) stop("bad input for 'x1'")
- if (!is.Numeric(x2)) stop("bad input for 'x2'")
- if (!is.Numeric(alpha)) stop("bad input for 'alpha'")
- if (any(abs(alpha) > 1)) stop("'alpha' values out of range")
- L = max(length(x1), length(x2), length(alpha))
- alpha = rep(alpha, length.out = L)
- x1 = rep(x1, length.out = L)
- x2 = rep(x2, length.out = L)
- temp = 1-alpha*(1-x1)*(1-x2)
- if (log.arg) {
- ans = log1p(-alpha+2*alpha*x1*x2/temp) - 2*log(temp)
- ans[(x1 <= 0) | (x1 >= 1) | (x2 <= 0) | (x2 >= 1)] = log(0)
- } else {
- ans = (1-alpha+2*alpha*x1*x2/temp) / (temp^2)
- ans[(x1 <= 0) | (x1 >= 1) | (x2 <= 0) | (x2 >= 1)] = 0
- }
- ans
+damh <- function(x1, x2, alpha, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+
+ if (!is.Numeric(x1)) stop("bad input for 'x1'")
+ if (!is.Numeric(x2)) stop("bad input for 'x2'")
+ if (!is.Numeric(alpha)) stop("bad input for 'alpha'")
+ if (any(abs(alpha) > 1)) stop("'alpha' values out of range")
+ L = max(length(x1), length(x2), length(alpha))
+ alpha = rep(alpha, length.out = L)
+ x1 = rep(x1, length.out = L)
+ x2 = rep(x2, length.out = L)
+ temp = 1-alpha*(1-x1)*(1-x2)
+ if (log.arg) {
+ ans = log1p(-alpha+2*alpha*x1*x2/temp) - 2*log(temp)
+ ans[(x1 <= 0) | (x1 >= 1) | (x2 <= 0) | (x2 >= 1)] = log(0)
+ } else {
+ ans = (1-alpha+2*alpha*x1*x2/temp) / (temp^2)
+ ans[(x1 <= 0) | (x1 >= 1) | (x2 <= 0) | (x2 >= 1)] = 0
+ }
+ ans
}
-pamh = function(q1, q2, alpha) {
+
+pamh <- function(q1, q2, alpha) {
if (!is.Numeric(q1)) stop("bad input for 'q1'")
if (!is.Numeric(q2)) stop("bad input for 'q2'")
if (!is.Numeric(alpha)) stop("bad input for 'alpha'")
@@ -1415,7 +1706,7 @@ pamh = function(q1, q2, alpha) {
ans
}
-ramh = function(n, alpha) {
+ramh <- function(n, alpha) {
if (!is.Numeric(n, positive = TRUE, allowable.length = 1,
integer.valued = TRUE))
stop("bad input for 'n'")
@@ -1430,118 +1721,148 @@ ramh = function(n, alpha) {
A = -alpha*(2*b*V2+1)+2*alpha^2*b^2*V2+1
B = alpha^2*(4*b^2*V2-4*b*V2+1)+alpha*(4*V2-4*b*V2-2)+1
U2 = (2*V2*(alpha*b - 1)^2)/(A+sqrt(B))
- matrix(c(U1,U2), nrow=n, ncol = 2)
+ matrix(c(U1,U2), nrow = n, ncol = 2)
}
-amh.control <- function(save.weight = TRUE, ...)
-{
- list(save.weight=save.weight)
+amh.control <- function(save.weight = TRUE, ...) {
+ list(save.weight = save.weight)
}
- amh = function(lalpha = "rhobit", ealpha = list(), ialpha = NULL,
- imethod = 1, nsimEIM = 250)
+ amh <- function(lalpha = "rhobit", ialpha = NULL,
+ imethod = 1, nsimEIM = 250)
{
- if (mode(lalpha) != "character" && mode(lalpha) != "name")
- lalpha = as.character(substitute(lalpha))
- if (!is.list(ealpha)) ealpha = list()
+ lalpha <- as.list(substitute(lalpha))
+ ealpha <- link2list(lalpha)
+ lalpha <- attr(ealpha, "function.name")
- if (length(ialpha) && (abs(ialpha) > 1))
- stop("'ialpha' should be less than or equal to 1 in absolute value")
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2)
- stop("imethod must be 1 or 2")
- if (length(nsimEIM) &&
- (!is.Numeric(nsimEIM, allowable.length = 1,
- integer.valued = TRUE) ||
- nsimEIM <= 50))
- stop("'nsimEIM' should be an integer greater than 50")
- new("vglmff",
- blurb = c("Ali-Mikhail-Haq distribution\n",
- "Links: ",
- namesof("alpha", lalpha, earg = ealpha )),
- initialize = eval(substitute(expression({
- if (!is.matrix(y) || ncol(y) != 2)
- stop("the response must be a 2 column matrix")
- if (any(y < 0) || any(y > 1))
- stop("the response must have values in the unit square")
- predictors.names=c(namesof("alpha", .lalpha, earg = .ealpha, short = TRUE))
- if (length(dimnames(y)))
- extra$dimnamesy2 = dimnames(y)[[2]]
- if (!length(etastart)) {
- ainit = if (length( .ialpha )) .ialpha else {
- mean1 = if ( .imethod == 1) weighted.mean(y[, 1],w) else
- median(y[, 1])
- mean2 = if ( .imethod == 1) weighted.mean(y[, 2],w) else
- median(y[, 2])
- Finit = weighted.mean(y[, 1] <= mean1 & y[, 2] <= mean2, w)
- (1 - (mean1 * mean2 / Finit)) / ((1-mean1) * (1-mean2))
- }
- ainit = min(0.95, max(ainit, -0.95))
- etastart = theta2eta(rep(ainit, length.out = n), .lalpha, earg = .ealpha )
- }
- }), list( .lalpha = lalpha, .ealpha = ealpha, .ialpha=ialpha,
- .imethod = imethod))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- alpha = eta2theta(eta, .lalpha, earg = .ealpha )
- fv.matrix = matrix(0.5, length(alpha), 2)
- if (length(extra$dimnamesy2))
- dimnames(fv.matrix) = list(names(eta), extra$dimnamesy2)
- fv.matrix
- }, list(.lalpha = lalpha, .ealpha = ealpha ))),
- last = eval(substitute(expression({
- misc$link = c("alpha"= .lalpha)
- misc$earg = list("alpha"= .ealpha )
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
- }), list(.lalpha = lalpha, .ealpha = ealpha, .nsimEIM = nsimEIM ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- alpha = eta2theta(eta, .lalpha, earg = .ealpha )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * damh(x1=y[, 1], x2=y[, 2], alpha=alpha, log = TRUE))
- }
- }, list( .lalpha = lalpha, .earg = ealpha ))),
- vfamily = c("amh"),
- deriv = eval(substitute(expression({
- alpha = eta2theta(eta, .lalpha, earg = .ealpha )
- dalpha.deta = dtheta.deta(alpha, .lalpha, earg = .ealpha )
- y1 = y[, 1]
- y2 = y[, 2]
- de3 = deriv3(~ (log(1-alpha+(2*alpha*y1*y2/(1-alpha*(1-y1)*(1-y2))))-
- 2*log(1-alpha*(1-y1)*(1-y2))) ,
- name = "alpha", hessian= FALSE)
- eval.de3 = eval(de3)
- dl.dalpha = attr(eval.de3, "gradient")
- w * dl.dalpha * dalpha.deta
- }), list(.lalpha = lalpha, .ealpha = ealpha ))),
- weight = eval(substitute(expression({
- sd3 = deriv3(~ (log(1-alpha+
- (2*alpha*y1sim*y2sim/(1-alpha*(1-y1sim)*(1-y2sim))))-
- 2*log(1-alpha*(1-y1sim)*(1-y2sim))) ,
- name = "alpha", hessian= FALSE)
- run.var = 0
- for(ii in 1:( .nsimEIM )) {
- ysim = ramh(n, alpha=alpha)
- y1sim = ysim[, 1]
- y2sim = ysim[, 1]
- eval.sd3 = eval(sd3)
- dl.alpha = attr(eval.sd3, "gradient")
- rm(ysim, y1sim, y2sim)
- temp3 = dl.dalpha
- run.var = ((ii - 1) * run.var + temp3^2) / ii
- }
- wz = if (intercept.only)
- matrix(colMeans(cbind(run.var)),
- n, dimm(M), byrow = TRUE) else cbind(run.var)
- wz = wz * dalpha.deta^2
- c(w) * wz
- }), list( .lalpha = lalpha, .ealpha = ealpha, .nsimEIM = nsimEIM ))))
+ if (length(ialpha) && (abs(ialpha) > 1))
+ stop("'ialpha' should be less than or equal to 1 in absolute value")
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("imethod must be 1 or 2")
+
+ if (length(nsimEIM) &&
+ (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE) ||
+ nsimEIM <= 50))
+ stop("'nsimEIM' should be an integer greater than 50")
+
+
+ new("vglmff",
+ blurb = c("Ali-Mikhail-Haq distribution\n",
+ "Links: ",
+ namesof("alpha", lalpha, earg = ealpha )),
+ initialize = eval(substitute(expression({
+ if (any(y < 0) || any(y > 1))
+ stop("the response must have values in the unit square")
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ ncol.w.max = 1,
+ ncol.y.max = 2,
+ ncol.y.min = 2,
+ out.wy = TRUE,
+ colsyperw = 2,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ predictors.names <-
+ c(namesof("alpha", .lalpha, earg = .ealpha, short = TRUE))
+
+ if (length(dimnames(y)))
+ extra$dimnamesy2 = dimnames(y)[[2]]
+
+ if (!length(etastart)) {
+ ainit = if (length( .ialpha )) .ialpha else {
+ mean1 = if ( .imethod == 1) weighted.mean(y[, 1], w) else
+ median(y[, 1])
+ mean2 = if ( .imethod == 1) weighted.mean(y[, 2], w) else
+ median(y[, 2])
+ Finit = weighted.mean(y[, 1] <= mean1 & y[, 2] <= mean2, w)
+ (1 - (mean1 * mean2 / Finit)) / ((1-mean1) * (1-mean2))
+ }
+ ainit = min(0.95, max(ainit, -0.95))
+ etastart =
+ theta2eta(rep(ainit, length.out = n), .lalpha, earg = .ealpha )
+ }
+ }), list( .lalpha = lalpha, .ealpha = ealpha, .ialpha = ialpha,
+ .imethod = imethod))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ alpha = eta2theta(eta, .lalpha, earg = .ealpha )
+ fv.matrix = matrix(0.5, length(alpha), 2)
+ if (length(extra$dimnamesy2))
+ dimnames(fv.matrix) = list(names(eta), extra$dimnamesy2)
+ fv.matrix
+ }, list( .lalpha = lalpha, .ealpha = ealpha ))),
+ last = eval(substitute(expression({
+ misc$link = c("alpha" = .lalpha )
+
+ misc$earg = list("alpha" = .ealpha )
+
+ misc$expected = TRUE
+ misc$nsimEIM = .nsimEIM
+ misc$multipleResponses <- FALSE
+ }), list( .lalpha = lalpha,
+ .ealpha = ealpha, .nsimEIM = nsimEIM ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ alpha = eta2theta(eta, .lalpha, earg = .ealpha )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * damh(x1=y[, 1], x2=y[, 2], alpha=alpha, log = TRUE))
+ }
+ }, list( .lalpha = lalpha, .ealpha = ealpha ))),
+ vfamily = c("amh"),
+ deriv = eval(substitute(expression({
+ alpha = eta2theta(eta, .lalpha, earg = .ealpha )
+
+ dalpha.deta = dtheta.deta(alpha, .lalpha, earg = .ealpha )
+
+ y1 = y[, 1]
+ y2 = y[, 2]
+ de3 = deriv3(~ (log(1-alpha+(2*alpha*y1*y2/(1-alpha*(1-y1)*(1-y2))))-
+ 2*log(1-alpha*(1-y1)*(1-y2))) ,
+ name = "alpha", hessian= FALSE)
+ eval.de3 = eval(de3)
+
+ dl.dalpha = attr(eval.de3, "gradient")
+
+ c(w) * dl.dalpha * dalpha.deta
+ }), list( .lalpha = lalpha, .ealpha = ealpha ))),
+ weight = eval(substitute(expression({
+ sd3 = deriv3(~ (log(1-alpha+
+ (2*alpha*y1sim*y2sim/(1-alpha*(1-y1sim)*(1-y2sim)))) -
+ 2*log(1-alpha*(1-y1sim)*(1-y2sim))) ,
+ name = "alpha", hessian= FALSE)
+ run.var = 0
+ for(ii in 1:( .nsimEIM )) {
+ ysim = ramh(n, alpha=alpha)
+ y1sim = ysim[, 1]
+ y2sim = ysim[, 1]
+ eval.sd3 = eval(sd3)
+ dl.alpha = attr(eval.sd3, "gradient")
+ rm(ysim, y1sim, y2sim)
+ temp3 = dl.dalpha
+ run.var = ((ii - 1) * run.var + temp3^2) / ii
+ }
+
+ wz = if (intercept.only)
+ matrix(colMeans(cbind(run.var)),
+ n, dimm(M), byrow = TRUE) else cbind(run.var)
+
+ wz = wz * dalpha.deta^2
+
+ c(w) * wz
+ }), list( .lalpha = lalpha,
+ .ealpha = ealpha, .nsimEIM = nsimEIM ))))
}
@@ -1558,11 +1879,14 @@ amh.control <- function(save.weight = TRUE, ...)
-dbinorm = function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
- rho = 0, log = FALSE) {
- log.arg = log
+dbinorm <- function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
+ rho = 0, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
rm(log)
+
+
temp5 = 1 - rho^2
zedd1 = (x1 - mean1) / sd1
zedd2 = (x2 - mean2) / sd2
@@ -1575,37 +1899,44 @@ dbinorm = function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
- binormal = function(lmean1 = "identity", emean1 = list(),
- lmean2 = "identity", emean2 = list(),
- lsd1 = "loge", esd1 = list(),
- lsd2 = "loge", esd2 = list(),
- lrho = "rhobit", erho = list(),
- imean1 = NULL, imean2 = NULL,
- isd1 = NULL, isd2 = NULL,
- irho = NULL, imethod = 1,
- equalmean = FALSE, equalsd = FALSE,
- zero = 3:5) {
- if (mode(lmean1) != "character" && mode(lmean1) != "name")
- lmean1 = as.character(substitute(lmean1))
- if (mode(lmean2) != "character" && mode(lmean2) != "name")
- lmean2 = as.character(substitute(lmean2))
- if (mode(lsd1 ) != "character" && mode(lsd1 ) != "name")
- lsd1 = as.character(substitute(lsd1 ))
- if (mode(lsd2 ) != "character" && mode(lsd2 ) != "name")
- lsd2 = as.character(substitute(lsd2 ))
- if (mode(lrho ) != "character" && mode(lrho ) != "name")
- lrho = as.character(substitute(lrho ))
-
- if (!is.list(emean1)) emean1 = list()
- if (!is.list(emean2)) emean2 = list()
- if (!is.list(esd1 )) esd1 = list()
- if (!is.list(esd2 )) esd2 = list()
- if (!is.list(erho )) erho = list()
-
- trivial1 = is.logical(equalmean) && length(equalmean) == 1 && !equalmean
- trivial2 = is.logical(equalsd ) && length(equalsd ) == 1 && !equalsd
+ binormal <- function(lmean1 = "identity",
+ lmean2 = "identity",
+ lsd1 = "loge",
+ lsd2 = "loge",
+ lrho = "rhobit",
+ imean1 = NULL, imean2 = NULL,
+ isd1 = NULL, isd2 = NULL,
+ irho = NULL, imethod = 1,
+ eq.mean = FALSE, eq.sd = FALSE,
+ zero = 3:5) {
+
+ lmean1 <- as.list(substitute(lmean1))
+ emean1 <- link2list(lmean1)
+ lmean1 <- attr(emean1, "function.name")
+
+ lmean2 <- as.list(substitute(lmean2))
+ emean2 <- link2list(lmean2)
+ lmean2 <- attr(emean2, "function.name")
+
+ lsd1 <- as.list(substitute(lsd1))
+ esd1 <- link2list(lsd1)
+ lsd1 <- attr(esd1, "function.name")
+
+ lsd2 <- as.list(substitute(lsd2))
+ esd2 <- link2list(lsd2)
+ lsd2 <- attr(esd2, "function.name")
+
+ lrho <- as.list(substitute(lrho))
+ erho <- link2list(lrho)
+ lrho <- attr(erho, "function.name")
+
+
+
+
+ trivial1 = is.logical(eq.mean) && length(eq.mean) == 1 && !eq.mean
+ trivial2 = is.logical(eq.sd ) && length(eq.sd ) == 1 && !eq.sd
if(!trivial1 && !trivial2)
- stop("only one of 'equalmean' and 'equalsd' can be assigned a value")
+ stop("only one of 'eq.mean' and 'eq.sd' can be assigned a value")
if (!is.Numeric(imethod, allowable.length = 1,
integer.valued = TRUE, positive = TRUE) ||
@@ -1624,19 +1955,30 @@ dbinorm = function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
temp8.m[2, 1] <- 1
temp8.s <- diag(5)[, -4]
temp8.s[4, 3] <- 1
- constraints <- cm.vgam(temp8.m, x, .equalmean,
+ constraints <- cm.vgam(temp8.m, x, .eq.mean,
constraints, intercept.apply = TRUE)
- constraints <- cm.vgam(temp8.s, x, .equalsd,
+ constraints <- cm.vgam(temp8.s, x, .eq.sd,
constraints, intercept.apply = TRUE)
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero,
- .equalsd = equalsd,
- .equalmean = equalmean ))),
+ .eq.sd = eq.sd,
+ .eq.mean = eq.mean ))),
initialize = eval(substitute(expression({
- if (!is.matrix(y) || ncol(y) != 2)
- stop("the response must be a 2 column matrix")
- predictors.names = c(
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 2,
+ ncol.y.min = 2,
+ out.wy = TRUE,
+ colsyperw = 2,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+ predictors.names <- c(
namesof("mean1", .lmean1, earg = .emean1, short = TRUE),
namesof("mean2", .lmean2, earg = .emean2, short = TRUE),
namesof("sd1", .lsd1, earg = .esd1, short = TRUE),
@@ -1651,8 +1993,10 @@ dbinorm = function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
weighted.mean(y[, 1], w = w), length.out = n)
imean2 = rep(if (length( .imean2 )) .imean2 else
weighted.mean(y[, 2], w = w), length.out = n)
- isd1 = rep(if (length( .isd1 )) .isd1 else sd(y[, 1]), length.out = n)
- isd2 = rep(if (length( .isd2 )) .isd2 else sd(y[, 2]), length.out = n)
+ isd1 = rep(if (length( .isd1 )) .isd1 else sd(y[, 1]),
+ length.out = n)
+ isd2 = rep(if (length( .isd2 )) .isd2 else sd(y[, 2]),
+ length.out = n)
irho = rep(if (length( .irho )) .irho else cor(y[, 1], y[, 2]),
length.out = n)
@@ -1692,12 +2036,15 @@ dbinorm = function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
"sd1" = .lsd1,
"sd2" = .lsd2,
"rho" = .lrho)
+
misc$earg = list("mean1" = .emean1,
"mean2" = .emean2,
"sd1" = .esd1,
"sd2" = .esd2,
"rho" = .erho)
+
misc$expected = TRUE
+ misc$multipleResponses <- FALSE
}) , list( .lmean1 = lmean1, .lmean2 = lmean2,
.emean1 = emean1, .emean2 = emean2,
.lsd1 = lsd1 , .lsd2 = lsd2 , .lrho = lrho,
@@ -1712,9 +2059,9 @@ dbinorm = function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dbinorm(x1 = y[, 1], x2 = y[, 2],
- mean1 = mean1, mean2 = mean2,
- sd1 = sd1, sd2 = sd2, rho = Rho, log = TRUE))
+ sum(c(w) * dbinorm(x1 = y[, 1], x2 = y[, 2],
+ mean1 = mean1, mean2 = mean2,
+ sd1 = sd1, sd2 = sd2, rho = Rho, log = TRUE))
}
} , list( .lmean1 = lmean1, .lmean2 = lmean2,
.emean1 = emean1, .emean2 = emean2,
@@ -1794,3 +2141,112 @@ dbinorm = function(x1, x2, mean1 = 0, mean2 = 0, sd1 = 1, sd2 = 1,
}
+
+
+
+
+
+gumbelI <-
+ function(la = "identity", earg = list(), ia = NULL, imethod = 1) {
+
+ la <- as.list(substitute(la))
+ earg <- link2list(la)
+ la <- attr(earg, "function.name")
+
+
+
+ if (length(ia) && !is.Numeric(ia, allowable.length = 1))
+ stop("'ia' must be a single number")
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2.5)
+ stop("argument 'imethod' must be 1 or 2")
+
+
+ new("vglmff",
+ blurb=c("Gumbel's Type I Bivariate Distribution\n",
+ "Links: ",
+ namesof("a", la, earg = earg )),
+ initialize=eval(substitute(expression({
+ if (!is.matrix(y) || ncol(y) != 2)
+ stop("the response must be a 2 column matrix")
+
+ if (any(y < 0))
+ stop("the response must have non-negative values only")
+
+ predictors.names = c(namesof("a", .la, earg = .earg , short = TRUE))
+ if (!length(etastart)) {
+ ainit = if (length( .ia )) rep( .ia, len = n) else {
+ mean1 = if ( .imethod == 1) median(y[,1]) else mean(y[,1])
+ mean2 = if ( .imethod == 1) median(y[,2]) else mean(y[,2])
+ Finit = 0.01 + mean(y[,1] <= mean1 & y[,2] <= mean2)
+ (log(Finit+expm1(-mean1)+exp(-mean2))+mean1+mean2)/(mean1*mean2)
+ }
+ etastart = theta2eta(rep(ainit, len = n), .la, earg = .earg )
+ }
+ }), list( .ia=ia, .la=la, .earg = earg, .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ alpha = eta2theta(eta, .la, earg = .earg )
+ cbind(rep(1, len = length(alpha)),
+ rep(1, len = length(alpha)))
+ }, list( .la=la ))),
+ last = eval(substitute(expression({
+ misc$link = c("a" = .la)
+ misc$earg = list("a" = .earg)
+ misc$expected = FALSE
+ misc$pooled.weight = pooled.weight
+ }), list( .la=la, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ alpha = eta2theta(eta, .la, earg = .earg )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ denom = (alpha*y[,1] - 1) * (alpha*y[,2] - 1) + alpha
+ mytolerance = .Machine$double.xmin
+ bad <- (denom <= mytolerance) # Range violation
+ if (any(bad)) {
+ cat("There are some range violations in @deriv\n")
+ flush.console()
+ denom[bad] = 2 * mytolerance
+ }
+ sum(w * (-y[,1] - y[,2] + alpha*y[,1]*y[,2] + log(denom)))
+ }
+ }, list( .la=la, .earg = earg ))),
+ vfamily=c("gumbelI"),
+ deriv=eval(substitute(expression({
+ alpha = eta2theta(eta, .la, earg = .earg )
+ numerator = (alpha*y[,1] - 1)*y[,2] + (alpha*y[,2] - 1)*y[,1] + 1
+ denom = (alpha*y[,1] - 1) * (alpha*y[,2] - 1) + alpha
+ denom = abs(denom)
+ dl.dalpha = numerator / denom + y[,1]*y[,2]
+ dalpha.deta = dtheta.deta(alpha, .la, earg = .earg )
+ c(w) * cbind(dl.dalpha * dalpha.deta)
+ }), list( .la=la, .earg = earg ))),
+ weight=eval(substitute(expression({
+ d2l.dalpha2 = (numerator/denom)^2 - 2*y[,1]*y[,2] / denom
+ d2alpha.deta2 = d2theta.deta2(alpha, .la, earg = .earg )
+ wz = w * (dalpha.deta^2 * d2l.dalpha2 - d2alpha.deta2 * dl.dalpha)
+ if (TRUE &&
+ intercept.only) {
+ wz = cbind(wz)
+ sumw = sum(w)
+ for(iii in 1:ncol(wz))
+ wz[,iii] = sum(wz[,iii]) / sumw
+ pooled.weight = TRUE
+ wz = c(w) * wz # Put back the weights
+ } else
+ pooled.weight = FALSE
+ wz
+ }), list( .la=la, .earg = earg ))))
+}
+
+
+
+
+
+
+
+
+
+
diff --git a/R/family.categorical.R b/R/family.categorical.R
index 5476c28..6fbf55d 100644
--- a/R/family.categorical.R
+++ b/R/family.categorical.R
@@ -131,12 +131,14 @@ Deviance.categorical.data.vgam <-
-dmultinomial = function(x, size = NULL, prob, log = FALSE,
+dmultinomial <- function(x, size = NULL, prob, log = FALSE,
dochecking = TRUE, smallno = 1.0e-7) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
rm(log)
+
+
x = as.matrix(x)
prob = as.matrix(prob)
if (((K <- ncol(x)) <= 1) ||
@@ -172,13 +174,15 @@ dmultinomial = function(x, size = NULL, prob, log = FALSE,
- sratio = function(link = "logit", earg = list(),
+ sratio <- function(link = "logit",
parallel = FALSE, reverse = FALSE, zero = NULL,
whitespace = FALSE)
{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
if (!is.logical(reverse) || length(reverse) != 1)
stop("argument 'reverse' must be a single logical")
@@ -188,7 +192,7 @@ dmultinomial = function(x, size = NULL, prob, log = FALSE,
new("vglmff",
- blurb = c("Stopping Ratio model\n\n",
+ blurb = c("Stopping ratio model\n\n",
"Links: ",
namesof(if (reverse)
ifelse(whitespace, "P[Y = j+1|Y <= j+1]", "P[Y=j+1|Y<=j+1]") else
@@ -205,18 +209,24 @@ dmultinomial = function(x, size = NULL, prob, log = FALSE,
deviance = Deviance.categorical.data.vgam,
initialize = eval(substitute(expression({
+
+ if (is.factor(y) && !is.ordered(y))
+ warning("response should be ordinal---see ordered()")
+
+
+
delete.zero.colns = TRUE
eval(process.categorical.data.vgam)
extra$wy.prod = TRUE
M = ncol(y) - 1
- mynames = if ( .reverse)
+ mynames = if ( .reverse )
paste("P[Y", .fillerChar, "=", .fillerChar, 2:(M+1), "|Y",
.fillerChar, "<=", .fillerChar, 2:(M+1), "]", sep = "") else
paste("P[Y", .fillerChar, "=", .fillerChar, 1:M, "|Y",
.fillerChar, ">=", .fillerChar, 1:M, "]", sep = "")
- predictors.names =
- namesof(mynames, .link , short = TRUE, earg = .earg)
+ predictors.names <-
+ namesof(mynames, .link , short = TRUE, earg = .earg )
y.names = paste("mu", 1:(M+1), sep = "")
extra$mymat = if ( .reverse ) tapplymat1(y, "cumsum") else
@@ -298,7 +308,7 @@ dmultinomial = function(x, size = NULL, prob, log = FALSE,
deriv = eval(substitute(expression({
if (!length(extra$mymat)) {
extra$mymat = if ( .reverse ) tapplymat1(y, "cumsum") else
- tapplymat1(y[,ncol(y):1], "cumsum")[,ncol(y):1]
+ tapplymat1(y[, ncol(y):1], "cumsum")[, ncol(y):1]
}
if ( .reverse ) {
djr = eta2theta(eta, .link , earg = .earg )
@@ -331,13 +341,15 @@ dmultinomial = function(x, size = NULL, prob, log = FALSE,
- cratio = function(link = "logit", earg = list(),
- parallel = FALSE, reverse = FALSE, zero = NULL,
- whitespace = FALSE)
+ cratio <- function(link = "logit",
+ parallel = FALSE, reverse = FALSE, zero = NULL,
+ whitespace = FALSE)
{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
if (!is.logical(reverse) || length(reverse) != 1)
stop("argument 'reverse' must be a single logical")
@@ -347,7 +359,7 @@ dmultinomial = function(x, size = NULL, prob, log = FALSE,
new("vglmff",
- blurb = c("Continuation Ratio model\n\n",
+ blurb = c("Continuation ratio model\n\n",
"Links: ",
namesof(if (reverse)
ifelse(whitespace, "P[Y < j+1|Y <= j+1]", "P[Y<j+1|Y<=j+1]") else
@@ -366,6 +378,12 @@ dmultinomial = function(x, size = NULL, prob, log = FALSE,
deviance = Deviance.categorical.data.vgam,
initialize = eval(substitute(expression({
+
+ if (is.factor(y) && !is.ordered(y))
+ warning("response should be ordinal---see ordered()")
+
+
+
delete.zero.colns = TRUE
eval(process.categorical.data.vgam)
M = ncol(y) - 1
@@ -375,8 +393,8 @@ dmultinomial = function(x, size = NULL, prob, log = FALSE,
.fillerChar, "<=", .fillerChar, 2:(M+1), "]", sep = "") else
paste("P[Y", .fillerChar, ">", .fillerChar, 1:M, "|Y",
.fillerChar, ">=", .fillerChar, 1:M, "]", sep = "")
- predictors.names =
- namesof(mynames, .link , short = TRUE, earg = .earg)
+ predictors.names <-
+ namesof(mynames, .link , earg = .earg , short = TRUE)
y.names = paste("mu", 1:(M+1), sep = "")
extra$mymat = if ( .reverse )
@@ -392,8 +410,7 @@ dmultinomial = function(x, size = NULL, prob, log = FALSE,
linkinv = eval(substitute( function(eta, extra = NULL) {
if (!is.matrix(eta))
eta = as.matrix(eta)
- fv.matrix =
- if ( .reverse ) {
+ fv.matrix = if ( .reverse ) {
M = ncol(eta)
djrs = eta2theta(eta, .link , earg = .earg )
temp = tapplymat1(djrs[, M:1], "cumprod")[, M:1]
@@ -409,12 +426,14 @@ dmultinomial = function(x, size = NULL, prob, log = FALSE,
fv.matrix
}, list( .earg = earg, .link = link, .reverse = reverse) )),
last = eval(substitute(expression({
- misc$link = rep( .link , length = M)
+ misc$link = rep( .link , length = M)
names(misc$link) = mynames
+
misc$earg = vector("list", M)
names(misc$earg) = names(misc$link)
- for (ii in 1:M) misc$earg[[ii]] = .earg
+ for (ii in 1:M)
+ misc$earg[[ii]] = .earg
misc$parameters = mynames
misc$reverse = .reverse
@@ -462,7 +481,7 @@ dmultinomial = function(x, size = NULL, prob, log = FALSE,
deriv = eval(substitute(expression({
if (!length(extra$mymat)) {
extra$mymat = if ( .reverse ) tapplymat1(y, "cumsum") else
- tapplymat1(y[,ncol(y):1], "cumsum")[,ncol(y):1]
+ tapplymat1(y[, ncol(y):1], "cumsum")[, ncol(y):1]
}
if ( .reverse ) {
djrs = eta2theta(eta, .link , earg = .earg )
@@ -496,7 +515,7 @@ dmultinomial = function(x, size = NULL, prob, log = FALSE,
-vglm.multinomial.deviance.control = function(maxit = 21, panic = FALSE, ...)
+vglm.multinomial.deviance.control <- function(maxit = 21, panic = FALSE, ...)
{
if (maxit < 1) {
warning("bad value of maxit; using 21 instead")
@@ -505,7 +524,8 @@ vglm.multinomial.deviance.control = function(maxit = 21, panic = FALSE, ...)
list(maxit=maxit, panic = as.logical(panic)[1])
}
-vglm.multinomial.control = function(maxit = 21, panic = FALSE,
+
+vglm.multinomial.control <- function(maxit = 21, panic = FALSE,
criterion = c("aic1", "aic2", names( .min.criterion.VGAM )), ...)
{
if (mode(criterion) != "character" && mode(criterion) != "name")
@@ -524,7 +544,7 @@ vglm.multinomial.control = function(maxit = 21, panic = FALSE,
}
-vglm.vcategorical.control = function(maxit = 30, trace = FALSE,
+vglm.vcategorical.control <- function(maxit = 30, trace = FALSE,
panic = TRUE, ...)
{
if (maxit < 1) {
@@ -541,9 +561,9 @@ vglm.vcategorical.control = function(maxit = 30, trace = FALSE,
- multinomial = function(zero = NULL, parallel = FALSE, nointercept = NULL,
- refLevel = "last",
- whitespace = FALSE)
+ multinomial <- function(zero = NULL, parallel = FALSE,
+ nointercept = NULL, refLevel = "last",
+ whitespace = FALSE)
{
if (length(refLevel) != 1)
stop("the length of 'refLevel' must be one")
@@ -551,13 +571,13 @@ vglm.vcategorical.control = function(maxit = 30, trace = FALSE,
if (is.character(refLevel)) {
if (refLevel != "last")
stop('if a character, refLevel must be "last"')
- refLevel = -1
+ refLevel <- -1
} else
if (is.factor(refLevel)) {
if (is.ordered(refLevel))
warning("'refLevel' is from an ordered factor")
- refLevel = as.character(refLevel) == levels(refLevel)
- refLevel = (1:length(refLevel))[refLevel]
+ refLevel <- as.character(refLevel) == levels(refLevel)
+ refLevel <- (1:length(refLevel))[refLevel]
if (!is.Numeric(refLevel, allowable.length = 1,
integer.valued = TRUE, positive = TRUE))
stop("could not coerce 'refLevel' into a single positive integer")
@@ -575,10 +595,11 @@ vglm.vcategorical.control = function(maxit = 30, trace = FALSE,
new("vglmff",
blurb = c("Multinomial logit model\n\n",
"Links: ",
- if (refLevel < 0)
+ if (refLevel < 0) {
ifelse(whitespace,
"log(mu[,j] / mu[,M+1]), j = 1:M,\n",
- "log(mu[,j]/mu[,M+1]), j=1:M,\n") else {
+ "log(mu[,j]/mu[,M+1]), j=1:M,\n")
+ } else {
if (refLevel == 1) {
paste("log(mu[,", "j]", fillerChar, "/", fillerChar,
"mu[,", refLevel, "]), j",
@@ -603,30 +624,42 @@ vglm.vcategorical.control = function(maxit = 30, trace = FALSE,
- constraints = cm.vgam(matrix(1, M, 1), x, .parallel, constraints,
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints,
intercept.apply = FALSE)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- constraints = cm.nointercept.vgam(constraints, x, .nointercept, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.nointercept.vgam(constraints, x, .nointercept, M)
}), list( .parallel = parallel, .zero = zero,
.nointercept = nointercept,
.refLevel = refLevel ))),
+
deviance = Deviance.categorical.data.vgam,
+ infos = eval(substitute(function(...) {
+ list(parallel = .parallel ,
+ refLevel = .refLevel ,
+ multipleResponses = FALSE,
+ zero = .zero )
+ }, list( .zero = zero,
+ .refLevel = refLevel,
+ .parallel = parallel
+ ))),
+
initialize = eval(substitute(expression({
delete.zero.colns = TRUE
eval(process.categorical.data.vgam)
- M = ncol(y)-1
- use.refLevel = if ( .refLevel < 0) M+1 else .refLevel
+ M <- ncol(y)-1
+ use.refLevel <- if ( .refLevel < 0) M+1 else .refLevel
if (use.refLevel > (M+1))
stop("argument 'refLevel' has a value that is too high")
- allbut.refLevel = (1:(M+1))[-use.refLevel]
- predictors.names =
+ allbut.refLevel <- (1:(M+1))[-use.refLevel]
+ predictors.names <-
paste("log(mu[,", allbut.refLevel,
"]", .fillerChar, "/", .fillerChar, "mu[,",
use.refLevel, "])", sep = "")
- y.names = paste("mu", 1:(M+1), sep = "")
+
+ y.names <- paste("mu", 1:(M+1), sep = "")
}), list( .refLevel = refLevel,
.fillerChar = fillerChar,
.whitespace = whitespace ))),
@@ -634,41 +667,53 @@ vglm.vcategorical.control = function(maxit = 30, trace = FALSE,
linkinv = eval(substitute( function(eta, extra = NULL) {
if (any(is.na(eta)))
warning("there are NAs in eta in slot inverse")
- M = ncol(cbind(eta))
+ M <- ncol(cbind(eta))
if ( (.refLevel < 0) || (.refLevel == M+1)) {
- phat = cbind(exp(eta), 1)
+ phat <- cbind(exp(eta), 1)
} else if ( .refLevel == 1) {
- phat = cbind(1, exp(eta))
+ phat <- cbind(1, exp(eta))
} else {
- use.refLevel = if ( .refLevel < 0) M+1 else .refLevel
- etamat = cbind(eta[, 1:( .refLevel - 1)], 0,
+ use.refLevel <- if ( .refLevel < 0) M+1 else .refLevel
+ etamat <- cbind(eta[, 1:( .refLevel - 1)], 0,
eta[, ( .refLevel ):M])
- phat = exp(etamat)
+ phat <- exp(etamat)
}
- ans = phat / as.vector(phat %*% rep(1, ncol(phat)))
+ ans <- phat / as.vector(phat %*% rep(1, ncol(phat)))
if (any(is.na(ans)))
warning("there are NAs here in slot inverse")
ans
}), list( .refLevel = refLevel )),
last = eval(substitute(expression({
- misc$refLevel = if ( .refLevel < 0) M+1 else .refLevel
- misc$link = "mlogit"
- misc$earg = list(mlogit = list()) # vector("list", M)
+ misc$refLevel <- if ( .refLevel < 0) M+1 else .refLevel
+ misc$link <- "mlogit"
- dy = dimnames(y)
+ misc$earg <- list(mlogit = list(
+ nointercept = .nointercept,
+ parallel = .parallel ,
+ refLevel = .refLevel ,
+ zero = .zero ))
+
+ dy <- dimnames(y)
if (!is.null(dy[[2]]))
- dimnames(fit$fitted.values) = dy
+ dimnames(fit$fitted.values) <- dy
- misc$nointercept = .nointercept
+ misc$multipleResponses <- FALSE
+ misc$nointercept <- .nointercept
+ misc$parallel <- .parallel
+ misc$refLevel <- .refLevel
+ misc$zero <- .zero
}), list( .refLevel = refLevel,
- .nointercept = nointercept ))),
+ .nointercept = nointercept,
+ .parallel = parallel,
+ .zero = zero
+ ))),
linkfun = eval(substitute( function(mu, extra = NULL) {
if ( .refLevel < 0) {
- log(mu[, -ncol(mu)] / mu[,ncol(mu)])
+ log(mu[, -ncol(mu)] / mu[, ncol(mu)])
} else {
- use.refLevel = if ( .refLevel < 0) ncol(mu) else .refLevel
+ use.refLevel <- if ( .refLevel < 0) ncol(mu) else .refLevel
log(mu[, -( use.refLevel )] / mu[, use.refLevel ])
}
}), list( .refLevel = refLevel )),
@@ -677,15 +722,15 @@ vglm.vcategorical.control = function(maxit = 30, trace = FALSE,
function(mu, y, w, residuals = FALSE, eta, extra = NULL)
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ ycounts <- if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
y * w # Convert proportions to counts
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ nvec <- if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
round(w)
- smallno = 1.0e4 * .Machine$double.eps
+ smallno <- 1.0e4 * .Machine$double.eps
if (max(abs(ycounts - round(ycounts))) > smallno)
warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts = round(ycounts)
+ ycounts <- round(ycounts)
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
dmultinomial(x = ycounts, size = nvec, prob = mu,
@@ -696,35 +741,35 @@ vglm.vcategorical.control = function(maxit = 30, trace = FALSE,
if ( .refLevel < 0) {
c(w) * (y[, -ncol(y)] - mu[, -ncol(y)])
} else {
- use.refLevel = if ( .refLevel < 0) M+1 else .refLevel
+ use.refLevel <- if ( .refLevel < 0) M+1 else .refLevel
c(w) * (y[, -use.refLevel] - mu[, -use.refLevel])
}
}), list( .refLevel = refLevel ))),
weight = eval(substitute(expression({
- mytiny = (mu < sqrt(.Machine$double.eps)) |
+ mytiny <- (mu < sqrt(.Machine$double.eps)) |
(mu > 1.0 - sqrt(.Machine$double.eps))
- use.refLevel = if ( .refLevel < 0) M+1 else .refLevel
+ use.refLevel <- if ( .refLevel < 0) M+1 else .refLevel
if (M == 1) {
- wz = mu[, 3-use.refLevel] * (1-mu[, 3-use.refLevel])
+ wz <- mu[, 3-use.refLevel] * (1-mu[, 3-use.refLevel])
} else {
- index = iam(NA, NA, M, both = TRUE, diag = TRUE)
- myinc = (index$row.index >= use.refLevel)
- index$row.index[myinc] = index$row.index[myinc] + 1
- myinc = (index$col.index >= use.refLevel)
- index$col.index[myinc] = index$col.index[myinc] + 1
-
- wz = -mu[,index$row] * mu[,index$col]
- wz[, 1:M] = wz[, 1:M] + mu[, -use.refLevel ]
+ index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
+ myinc <- (index$row.index >= use.refLevel)
+ index$row.index[myinc] <- index$row.index[myinc] + 1
+ myinc <- (index$col.index >= use.refLevel)
+ index$col.index[myinc] <- index$col.index[myinc] + 1
+
+ wz <- -mu[,index$row] * mu[,index$col]
+ wz[, 1:M] <- wz[, 1:M] + mu[, -use.refLevel ]
}
- atiny = (mytiny %*% rep(1, ncol(mu))) > 0 # apply(mytiny, 1, any)
+ atiny <- (mytiny %*% rep(1, ncol(mu))) > 0 # apply(mytiny, 1, any)
if (any(atiny)) {
- if (M == 1) wz[atiny] = wz[atiny] *
+ if (M == 1) wz[atiny] <- wz[atiny] *
(1 + .Machine$double.eps^0.5) +
.Machine$double.eps else
- wz[atiny, 1:M] = wz[atiny, 1:M] * (1 + .Machine$double.eps^0.5) +
+ wz[atiny, 1:M] <- wz[atiny, 1:M] * (1 + .Machine$double.eps^0.5) +
.Machine$double.eps
}
c(w) * wz
@@ -735,14 +780,19 @@ vglm.vcategorical.control = function(maxit = 30, trace = FALSE,
- cumulative = function(link = "logit", earg = list(),
- parallel = FALSE, reverse = FALSE,
- mv = FALSE,
- intercept.apply = FALSE,
- whitespace = FALSE)
+ cumulative <- function(link = "logit",
+ parallel = FALSE, reverse = FALSE,
+ mv = FALSE,
+ intercept.apply = FALSE,
+ whitespace = FALSE)
{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
+
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
stopifnot(is.logical(whitespace) &&
length(whitespace) == 1)
@@ -751,14 +801,13 @@ vglm.vcategorical.control = function(maxit = 30, trace = FALSE,
if (!is.logical(mv) || length(mv) != 1)
stop("argument 'mv' must be a single logical")
- if (!is.list(earg))
- earg = list()
if (!is.logical(reverse) || length(reverse) != 1)
stop("argument 'reverse' must be a single logical")
new("vglmff",
- blurb = if ( mv ) c(paste("Multivariate cumulative", link, "model\n\n"),
+ blurb = if ( mv )
+ c(paste("Multivariate cumulative", link, "model\n\n"),
"Links: ",
namesof(if (reverse)
ifelse(whitespace, "P[Y1 >= j+1]", "P[Y1>=j+1]") else
@@ -779,7 +828,7 @@ vglm.vcategorical.control = function(maxit = 30, trace = FALSE,
Hk.matrix = kronecker(diag(NOS), matrix(1,Llevels-1,1))
constraints = cm.vgam(Hk.matrix, x, .parallel, constraints,
intercept.apply = .intercept.apply)
- }
+ }
} else {
constraints = cm.vgam(matrix(1, M, 1), x, .parallel, constraints,
intercept.apply = .intercept.apply)
@@ -789,7 +838,7 @@ vglm.vcategorical.control = function(maxit = 30, trace = FALSE,
deviance = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- answer =
+ answer <-
if ( .mv ) {
totdev = 0
NOS = extra$NOS
@@ -815,65 +864,75 @@ vglm.vcategorical.control = function(maxit = 30, trace = FALSE,
initialize = eval(substitute(expression({
- if (colnames(x)[1] != "(Intercept)")
- stop("there is no intercept term!")
+ if (colnames(x)[1] != "(Intercept)")
+ warning("there seems to be no intercept term!")
- extra$mv = .mv
- if ( .mv ) {
- checkCut(y) # Check the input; stops if there is an error.
- if (any(w != 1) || ncol(cbind(w)) != 1)
- stop("the 'weights' argument must be a vector of all ones")
- Llevels = max(y)
- delete.zero.colns = FALSE
- orig.y = cbind(y) # Convert y into a matrix if necessary
- NOS = ncol(cbind(orig.y))
- use.y = use.mustart = NULL
- for (iii in 1:NOS) {
- y = as.factor(orig.y[,iii])
- eval(process.categorical.data.vgam)
- use.y = cbind(use.y, y)
- use.mustart = cbind(use.mustart, mustart)
- }
- mustart = use.mustart
- y = use.y # n x (Llevels*NOS)
- M = NOS * (Llevels-1)
- mynames = y.names = NULL
- for (iii in 1:NOS) {
- Y.names = paste("Y", iii, sep = "")
- mu.names = paste("mu", iii, ".", sep = "")
- mynames = c(mynames, if ( .reverse )
- paste("P[", Y.names, ">=", 2:Llevels, "]", sep = "") else
- paste("P[", Y.names, "<=", 1:(Llevels-1), "]", sep = ""))
- y.names = c(y.names, paste(mu.names, 1:Llevels, sep = ""))
- }
- predictors.names =
- namesof(mynames, .link , short = TRUE, earg = .earg)
- extra$NOS = NOS
- extra$Llevels = Llevels
- } else {
- delete.zero.colns = TRUE
-
- eval(process.categorical.data.vgam)
- M = ncol(y)-1
- mynames = if ( .reverse )
- paste("P[Y", .fillerChar , ">=", .fillerChar,
- 2:(1+M), "]", sep = "") else
- paste("P[Y", .fillerChar , "<=", .fillerChar,
- 1:M, "]", sep = "")
- predictors.names =
- namesof(mynames, .link , short = TRUE, earg = .earg)
- y.names = paste("mu", 1:(M+1), sep = "")
- if (ncol(cbind(w)) == 1) {
- if (length(mustart) && all(c(y) %in% c(0, 1)))
- for (iii in 1:ncol(y))
- mustart[,iii] = weighted.mean(y[,iii], w)
- }
+ if (is.factor(y) && !is.ordered(y))
+ warning("response should be ordinal---see ordered()")
+
- if (length(dimnames(y)))
- extra$dimnamesy2 = dimnames(y)[[2]]
+ extra$mv <- .mv
+ if ( .mv ) {
+ checkCut(y) # Check the input; stops if there is an error.
+ if (any(w != 1) || ncol(cbind(w)) != 1)
+ stop("the 'weights' argument must be a vector of all ones")
+ Llevels <- max(y)
+ delete.zero.colns <- FALSE
+ orig.y <- cbind(y) # Convert y into a matrix if necessary
+ NOS <- ncol(cbind(orig.y))
+ use.y <- use.mustart <- NULL
+ for (iii in 1:NOS) {
+ y <- as.factor(orig.y[,iii])
+ eval(process.categorical.data.vgam)
+ use.y <- cbind(use.y, y)
+ use.mustart <- cbind(use.mustart, mustart)
+ }
+ mustart <- use.mustart
+ y <- use.y # n x (Llevels*NOS)
+ M <- NOS * (Llevels-1)
+ mynames <- y.names <- NULL
+ for (iii in 1:NOS) {
+ Y.names <- paste("Y", iii, sep = "")
+ mu.names <- paste("mu", iii, ".", sep = "")
+ mynames <- c(mynames, if ( .reverse )
+ paste("P[", Y.names, ">=", 2:Llevels, "]", sep = "") else
+ paste("P[", Y.names, "<=", 1:(Llevels-1), "]", sep = ""))
+ y.names <- c(y.names, paste(mu.names, 1:Llevels, sep = ""))
}
- }), list( .link = link, .reverse = reverse, .mv = mv, .earg = earg,
+
+ predictors.names <-
+ namesof(mynames, .link , short = TRUE, earg = .earg )
+
+ extra$NOS <- NOS
+ extra$Llevels <- Llevels
+ } else {
+
+ delete.zero.colns <- TRUE
+
+ eval(process.categorical.data.vgam)
+ M <- ncol(y) - 1
+ mynames <- if ( .reverse )
+ paste("P[Y", .fillerChar , ">=", .fillerChar,
+ 2:(1+M), "]", sep = "") else
+ paste("P[Y", .fillerChar , "<=", .fillerChar,
+ 1:M, "]", sep = "")
+
+ predictors.names <-
+ namesof(mynames, .link , short = TRUE, earg = .earg )
+ y.names <- paste("mu", 1:(M+1), sep = "")
+
+ if (ncol(cbind(w)) == 1) {
+ if (length(mustart) && all(c(y) %in% c(0, 1)))
+ for (iii in 1:ncol(y))
+ mustart[,iii] <- weighted.mean(y[,iii], w)
+ }
+
+ if (length(dimnames(y)))
+ extra$dimnamesy2 <- dimnames(y)[[2]]
+ }
+ }), list( .reverse = reverse, .mv = mv,
+ .link = link, .earg = earg,
.fillerChar = fillerChar,
.whitespace = whitespace ))),
@@ -881,57 +940,61 @@ vglm.vcategorical.control = function(maxit = 30, trace = FALSE,
linkinv = eval(substitute( function(eta, extra = NULL) {
answer =
if ( .mv ) {
- NOS = extra$NOS
- Llevels = extra$Llevels
- fv.matrix = matrix(0, nrow(eta), NOS*Llevels)
- for (iii in 1:NOS) {
- cindex = (iii-1)*(Llevels-1) + 1:(Llevels-1)
- aindex = (iii-1)*(Llevels) + 1:(Llevels)
- if ( .reverse ) {
- ccump = cbind(1,
- eta2theta(eta[, cindex, drop = FALSE],
- .link , earg = .earg ))
- fv.matrix[,aindex] =
- cbind(-tapplymat1(ccump, "diff"),
- ccump[,ncol(ccump)])
- } else {
- cump = cbind(eta2theta(eta[, cindex, drop = FALSE],
- .link ,
- earg = .earg),
- 1)
- fv.matrix[,aindex] =
- cbind(cump[, 1], tapplymat1(cump, "diff"))
- }
- }
- fv.matrix
- } else {
- fv.matrix =
+ NOS = extra$NOS
+ Llevels = extra$Llevels
+ fv.matrix = matrix(0, nrow(eta), NOS*Llevels)
+ for (iii in 1:NOS) {
+ cindex = (iii-1)*(Llevels-1) + 1:(Llevels-1)
+ aindex = (iii-1)*(Llevels) + 1:(Llevels)
if ( .reverse ) {
- ccump = cbind(1, eta2theta(eta, .link , earg = .earg))
- cbind(-tapplymat1(ccump, "diff"), ccump[,ncol(ccump)])
+ ccump = cbind(1,
+ eta2theta(eta[, cindex, drop = FALSE],
+ .link , earg = .earg ))
+ fv.matrix[,aindex] =
+ cbind(-tapplymat1(ccump, "diff"),
+ ccump[, ncol(ccump)])
} else {
- cump = cbind(eta2theta(eta, .link , earg = .earg), 1)
- cbind(cump[, 1], tapplymat1(cump, "diff"))
+ cump = cbind(eta2theta(eta[, cindex, drop = FALSE],
+ .link ,
+ earg = .earg ),
+ 1)
+ fv.matrix[,aindex] =
+ cbind(cump[, 1], tapplymat1(cump, "diff"))
}
- if (length(extra$dimnamesy2))
- dimnames(fv.matrix) = list(dimnames(eta)[[1]],
- extra$dimnamesy2)
- fv.matrix
+ }
+ fv.matrix
+ } else {
+ fv.matrix =
+ if ( .reverse ) {
+ ccump = cbind(1, eta2theta(eta, .link , earg = .earg ))
+ cbind(-tapplymat1(ccump, "diff"), ccump[, ncol(ccump)])
+ } else {
+ cump = cbind(eta2theta(eta, .link , earg = .earg ), 1)
+ cbind(cump[, 1], tapplymat1(cump, "diff"))
+ }
+ if (length(extra$dimnamesy2))
+ dimnames(fv.matrix) = list(dimnames(eta)[[1]],
+ extra$dimnamesy2)
+ fv.matrix
}
answer
- }, list( .link = link, .reverse = reverse,
- .earg = earg, .mv = mv ))),
+ }, list( .reverse = reverse,
+ .link = link, .earg = earg,
+ .mv = mv ))),
last = eval(substitute(expression({
if ( .mv ) {
misc$link = .link
misc$earg = list( .earg )
+
} else {
misc$link = rep( .link , length = M)
names(misc$link) = mynames
+
misc$earg = vector("list", M)
names(misc$earg) = names(misc$link)
for (ii in 1:M) misc$earg[[ii]] = .earg
+
}
misc$fillerChar = .fillerChar
@@ -941,57 +1004,60 @@ vglm.vcategorical.control = function(maxit = 30, trace = FALSE,
misc$reverse = .reverse
misc$parallel = .parallel
misc$mv = .mv
- }), list( .link = link, .reverse = reverse, .parallel = parallel,
- .mv = mv, .earg = earg,
- .fillerChar = fillerChar,
+ }), list(
+ .reverse = reverse, .parallel = parallel,
+ .link = link, .earg = earg,
+ .fillerChar = fillerChar, .mv = mv,
.whitespace = whitespace ))),
- linkfun = eval(substitute( function(mu, extra = NULL) {
- answer =
- if ( .mv ) {
- NOS = extra$NOS
- Llevels = extra$Llevels
- eta.matrix = matrix(0, nrow(mu), NOS*(Llevels-1))
- for (iii in 1:NOS) {
- cindex = (iii-1)*(Llevels-1) + 1:(Llevels-1)
- aindex = (iii-1)*(Llevels) + 1:(Llevels)
- cump = tapplymat1(as.matrix(mu[,aindex]), "cumsum")
- eta.matrix[,cindex] =
- theta2eta(if ( .reverse) 1-cump[, 1:(Llevels-1)] else
- cump[, 1:(Llevels-1)], .link , earg = .earg)
- }
- eta.matrix
- } else {
- cump = tapplymat1(as.matrix(mu), "cumsum")
- M = ncol(as.matrix(mu)) - 1
- theta2eta(if ( .reverse ) 1-cump[, 1:M] else cump[, 1:M],
- .link ,
- earg = .earg)
- }
- answer
- }, list( .link = link, .reverse = reverse, .earg = earg, .mv = mv ))),
- loglikelihood =
- function(mu, y, w, residuals = FALSE, eta, extra = NULL)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
- y * w # Convert proportions to counts
- nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
- round(w)
-
- smallno = 1.0e4 * .Machine$double.eps
- if (max(abs(ycounts - round(ycounts))) > smallno)
- warning("converting 'ycounts' to integer in @loglikelihood")
- ycounts = round(ycounts)
-
- sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
- dmultinomial(x = ycounts, size = nvec, prob = mu,
- log = TRUE, dochecking = FALSE))
- },
+ linkfun = eval(substitute( function(mu, extra = NULL) {
+ answer =
+ if ( .mv ) {
+ NOS = extra$NOS
+ Llevels = extra$Llevels
+ eta.matrix = matrix(0, nrow(mu), NOS*(Llevels-1))
+ for (iii in 1:NOS) {
+ cindex = (iii-1)*(Llevels-1) + 1:(Llevels-1)
+ aindex = (iii-1)*(Llevels) + 1:(Llevels)
+ cump = tapplymat1(as.matrix(mu[,aindex]), "cumsum")
+ eta.matrix[,cindex] =
+ theta2eta(if ( .reverse ) 1-cump[, 1:(Llevels-1)] else
+ cump[, 1:(Llevels-1)], .link , earg = .earg )
+ }
+ eta.matrix
+ } else {
+ cump = tapplymat1(as.matrix(mu), "cumsum")
+ M = ncol(as.matrix(mu)) - 1
+ theta2eta(if ( .reverse ) 1-cump[, 1:M] else cump[, 1:M],
+ .link ,
+ earg = .earg )
+ }
+ answer
+ }, list(
+ .link = link, .earg = earg,
+ .reverse = reverse, .mv = mv ))),
+ loglikelihood =
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL)
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ ycounts = if (is.numeric(extra$orig.w)) y * w / extra$orig.w else
+ y * w # Convert proportions to counts
+ nvec = if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
+ round(w)
+
+ smallno = 1.0e4 * .Machine$double.eps
+ if (max(abs(ycounts - round(ycounts))) > smallno)
+ warning("converting 'ycounts' to integer in @loglikelihood")
+ ycounts = round(ycounts)
+
+ sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
+ dmultinomial(x = ycounts, size = nvec, prob = mu,
+ log = TRUE, dochecking = FALSE))
+ },
vfamily = c("cumulative", "vcategorical"),
deriv = eval(substitute(expression({
mu.use = pmax(mu, .Machine$double.eps * 1.0e-0)
- deriv.answer =
+ deriv.answer =
if ( .mv ) {
NOS = extra$NOS
Llevels = extra$Llevels
@@ -1000,22 +1066,24 @@ vglm.vcategorical.control = function(maxit = 30, trace = FALSE,
cindex = (iii-1)*(Llevels-1) + 1:(Llevels-1)
aindex = (iii-1)*(Llevels) + 1:(Llevels-1)
cump = eta2theta(eta[,cindex, drop = FALSE],
- .link , earg = .earg)
- dcump.deta[,cindex] = dtheta.deta(cump, .link , earg = .earg)
+ .link , earg = .earg )
+ dcump.deta[,cindex] = dtheta.deta(cump, .link , earg = .earg )
resmat[,cindex] =
(y[,aindex, drop = FALSE] / mu.use[,aindex, drop = FALSE] -
y[, 1+aindex, drop = FALSE]/mu.use[, 1+aindex, drop = FALSE])
}
- (if ( .reverse) -c(w) else c(w)) * dcump.deta * resmat
+ (if ( .reverse ) -c(w) else c(w)) * dcump.deta * resmat
} else {
- cump = eta2theta(eta, .link , earg = .earg)
- dcump.deta = dtheta.deta(cump, .link , earg = .earg)
- c(if ( .reverse) -c(w) else c(w)) * dcump.deta *
- (y[, -(M+1)]/mu.use[, -(M+1)] - y[, -1] / mu.use[, -1])
+ cump <- eta2theta(eta, .link , earg = .earg )
+ dcump.deta <- dtheta.deta(cump, .link , earg = .earg )
+ c(if ( .reverse ) -c(w) else c(w)) *
+ dcump.deta *
+ (y[, -(M+1)] / mu.use[, -(M+1)] - y[, -1] / mu.use[, -1])
}
deriv.answer
- }), list( .link = link, .reverse = reverse,
- .earg = earg, .mv = mv ))),
+ }), list( .link = link, .earg = earg,
+ .reverse = reverse,
+ .mv = mv ))),
weight = eval(substitute(expression({
if ( .mv ) {
NOS = extra$NOS
@@ -1051,20 +1119,23 @@ vglm.vcategorical.control = function(maxit = 30, trace = FALSE,
}
} else {
- wz = c(w) * dcump.deta^2 * (1/mu.use[, 1:M] + 1/mu.use[, -1])
+ wz <- c(w) * dcump.deta^2 * (1/mu.use[, 1:M] + 1/mu.use[, -1])
if (M > 1)
- wz = cbind(wz, -c(w) * dcump.deta[, -M] *
+ wz <- cbind(wz,
+ -c(w) * dcump.deta[, -M] *
dcump.deta[, 2:M] / mu.use[, 2:M])
}
wz
- }), list( .earg = earg, .link = link, .mv = mv ))))
+ }), list(
+ .earg = earg, .link = link,
+ .mv = mv ))))
}
- propodds = function(reverse = TRUE, whitespace = FALSE) {
+ propodds <- function(reverse = TRUE, whitespace = FALSE) {
if (!is.logical(reverse) || length(reverse) != 1)
stop("argument 'reverse' must be a single logical")
@@ -1074,14 +1145,15 @@ vglm.vcategorical.control = function(maxit = 30, trace = FALSE,
- acat = function(link = "loge", earg = list(),
- parallel = FALSE, reverse = FALSE, zero = NULL,
- whitespace = FALSE)
+ acat <- function(link = "loge", parallel = FALSE,
+ reverse = FALSE, zero = NULL, whitespace = FALSE)
{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg))
- earg = list()
+
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
if (!is.logical(reverse) || length(reverse) != 1)
stop("argument 'reverse' must be a single logical")
@@ -1110,24 +1182,30 @@ vglm.vcategorical.control = function(maxit = 30, trace = FALSE,
deviance = Deviance.categorical.data.vgam,
initialize = eval(substitute(expression({
+
+ if (is.factor(y) && !is.ordered(y))
+ warning("response should be ordinal---see ordered()")
+
+
+
delete.zero.colns = TRUE
eval(process.categorical.data.vgam)
M = ncol(y) - 1
- mynames = if ( .reverse )
- paste("P[Y", .fillerChar , "=",
- 1:M, "]", .fillerChar , "/", .fillerChar ,
- "P[Y", .fillerChar , "=", .fillerChar , 2:(M+1), "]",
- sep = "") else
- paste("P[Y", .fillerChar , "=", .fillerChar , 2:(M+1), "]",
- .fillerChar , "/", .fillerChar , "P[Y", .fillerChar ,
- "=", .fillerChar , 1:M, "]", sep = "")
-
- predictors.names =
- namesof(mynames, .link , short = TRUE, earg = .earg)
- y.names = paste("mu", 1:(M+1), sep = "")
+ mynames = if ( .reverse )
+ paste("P[Y", .fillerChar , "=",
+ 1:M, "]", .fillerChar , "/", .fillerChar ,
+ "P[Y", .fillerChar , "=", .fillerChar , 2:(M+1), "]",
+ sep = "") else
+ paste("P[Y", .fillerChar , "=", .fillerChar , 2:(M+1), "]",
+ .fillerChar , "/", .fillerChar , "P[Y", .fillerChar ,
+ "=", .fillerChar , 1:M, "]", sep = "")
+
+ predictors.names <-
+ namesof(mynames, .link , short = TRUE, earg = .earg )
+ y.names = paste("mu", 1:(M+1), sep = "")
- if (length(dimnames(y)))
- extra$dimnamesy2 = dimnames(y)[[2]]
+ if (length(dimnames(y)))
+ extra$dimnamesy2 = dimnames(y)[[2]]
}), list( .earg = earg, .link = link, .reverse = reverse,
.fillerChar = fillerChar,
.whitespace = whitespace ))),
@@ -1169,7 +1247,8 @@ vglm.vcategorical.control = function(maxit = 30, trace = FALSE,
linkfun = eval(substitute( function(mu, extra = NULL) {
M = ncol(mu) - 1
theta2eta(if ( .reverse ) mu[, 1:M] / mu[, -1] else
- mu[, -1] / mu[, 1:M], .link , earg = .earg )
+ mu[, -1] / mu[, 1:M],
+ .link , earg = .earg )
}, list( .earg = earg, .link = link, .reverse = reverse) )),
loglikelihood =
function(mu, y, w, residuals = FALSE, eta, extra = NULL)
@@ -1191,33 +1270,42 @@ vglm.vcategorical.control = function(maxit = 30, trace = FALSE,
},
vfamily = c("acat", "vcategorical"),
deriv = eval(substitute(expression({
- zeta = eta2theta(eta, .link , earg = .earg ) # May be zetar
- d1 = acat.deriv(zeta, M = M, n = n, reverse=.reverse)
- score = attr(d1, "gradient") / d1
- dzeta.deta = dtheta.deta(zeta, .link , earg = .earg )
+ zeta <- eta2theta(eta, .link , earg = .earg ) # May be zetar
+
+ dzeta.deta <- dtheta.deta(zeta, .link , earg = .earg )
+
+ d1 <- acat.deriv(zeta, M = M, n = n, reverse = .reverse )
+ score <- attr(d1, "gradient") / d1
+
+
+ answer <-
if ( .reverse ) {
cumy = tapplymat1(y, "cumsum")
c(w) * dzeta.deta * (cumy[, 1:M] / zeta - score)
} else {
- ccumy = tapplymat1(y[,ncol(y):1], "cumsum")[,ncol(y):1]
+ ccumy = tapplymat1(y[, ncol(y):1], "cumsum")[, ncol(y):1]
c(w) * dzeta.deta * (ccumy[, -1] / zeta - score)
}
+
+
+ answer
}), list( .earg = earg, .link = link, .reverse = reverse) )),
weight = eval(substitute(expression({
- wz = matrix(as.numeric(NA), n, dimm(M))
+ wz = matrix(as.numeric(NA), n, dimm(M))
- hess = attr(d1, "hessian") / d1
+ hess = attr(d1, "hessian") / d1
if (M > 1)
for (jay in 1:(M-1))
for (kay in (jay+1):M)
- wz[,iam(jay,kay,M)] = (hess[,jay,kay] - score[,jay] *
- score[,kay]) * dzeta.deta[,jay] * dzeta.deta[,kay]
+ wz[,iam(jay, kay,M)] <-
+ (hess[, jay, kay] - score[, jay] * score[, kay]) *
+ dzeta.deta[, jay] * dzeta.deta[, kay]
if ( .reverse ) {
cump = tapplymat1(mu, "cumsum")
wz[, 1:M] = (cump[, 1:M] / zeta^2 - score^2) * dzeta.deta^2
} else {
- ccump = tapplymat1(mu[,ncol(mu):1], "cumsum")[, ncol(mu):1]
+ ccump = tapplymat1(mu[, ncol(mu):1], "cumsum")[, ncol(mu):1]
wz[, 1:M] = (ccump[, -1] / zeta^2 - score^2) * dzeta.deta^2
}
c(w) * wz
@@ -1225,42 +1313,43 @@ vglm.vcategorical.control = function(maxit = 30, trace = FALSE,
}
-acat.deriv = function(zeta, reverse, M, n)
+acat.deriv <- function(zeta, reverse, M, n)
{
- alltxt = NULL
- for (ii in 1:M) {
- index = if (reverse) ii:M else 1:ii
- vars = paste("zeta", index, sep = "")
- txt = paste(vars, collapse = "*")
- alltxt = c(alltxt, txt)
- }
- alltxt = paste(alltxt, collapse = " + ")
- alltxt = paste(" ~ 1 +", alltxt)
- txt = as.formula(alltxt)
+ alltxt = NULL
+ for (ii in 1:M) {
+ index = if (reverse) ii:M else 1:ii
+ vars = paste("zeta", index, sep = "")
+ txt = paste(vars, collapse = "*")
+ alltxt = c(alltxt, txt)
+ }
+ alltxt = paste(alltxt, collapse = " + ")
+ alltxt = paste(" ~ 1 +", alltxt)
+ txt = as.formula(alltxt)
- allvars = paste("zeta", 1:M, sep = "")
- d1 = deriv3(txt, allvars, hessian = TRUE)
+ allvars = paste("zeta", 1:M, sep = "")
+ d1 = deriv3(txt, allvars, hessian = TRUE)
- zeta = as.matrix(zeta)
- for (ii in 1:M)
- assign(paste("zeta", ii, sep = ""), zeta[, ii])
+ zeta = as.matrix(zeta)
+ for (ii in 1:M)
+ assign(paste("zeta", ii, sep = ""), zeta[, ii])
- ans = eval(d1)
- ans
+ ans = eval(d1)
+ ans
}
- brat = function(refgp = "last",
- refvalue = 1,
- init.alpha = 1)
+ brat <- function(refgp = "last",
+ refvalue = 1,
+ init.alpha = 1)
{
if (!is.Numeric(init.alpha, positive = TRUE))
stop("'init.alpha' must contain positive values only")
if (!is.Numeric(refvalue, allowable.length = 1, positive = TRUE))
stop("'refvalue' must be a single positive value")
+
if (!is.character(refgp) &&
!is.Numeric(refgp, allowable.length = 1,
integer.valued = TRUE, positive = TRUE))
@@ -1282,7 +1371,8 @@ acat.deriv = function(zeta, reverse, M, n)
stop("cannot determine 'M'")
init.alpha = matrix( rep( .init.alpha , length.out = M),
n, M, byrow = TRUE)
- etastart = matrix(theta2eta(init.alpha, "loge", earg = list()),
+ etastart <- matrix(theta2eta(init.alpha, "loge",
+ earg = list(theta = NULL)),
n, M, byrow = TRUE)
refgp = .refgp
if (!intercept.only)
@@ -1290,15 +1380,16 @@ acat.deriv = function(zeta, reverse, M, n)
extra$ybrat.indices = .brat.indices(NCo = M+1, are.ties = FALSE)
uindex = if ( .refgp == "last") 1:M else (1:(M+1))[-( .refgp ) ]
- predictors.names =
+ predictors.names <-
namesof(paste("alpha", uindex, sep = ""), "loge", short = TRUE)
}), list( .refgp = refgp, .init.alpha=init.alpha ))),
linkinv = eval(substitute( function(eta, extra = NULL) {
probs = NULL
- eta = as.matrix(eta) # in case M=1
+ eta = as.matrix(eta) # in case M = 1
for (ii in 1:nrow(eta)) {
- alpha = .brat.alpha(eta2theta(eta[ii,], "loge", earg = list()),
+ alpha = .brat.alpha(eta2theta(eta[ii,], "loge",
+ earg = list(theta = NULL)),
.refvalue , .refgp )
alpha1 = alpha[extra$ybrat.indices[, "rindex"]]
alpha2 = alpha[extra$ybrat.indices[, "cindex"]]
@@ -1309,10 +1400,16 @@ acat.deriv = function(zeta, reverse, M, n)
}, list( .refgp = refgp, .refvalue = refvalue) )),
last = eval(substitute(expression({
- misc$link = rep( "loge", length = M)
- names(misc$link) = paste("alpha", uindex, sep = "")
- misc$refgp = .refgp
- misc$refvalue = .refvalue
+ misc$link <- rep( "loge", length = M)
+ names(misc$link) <- paste("alpha", uindex, sep = "")
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
+ for (ii in 1:M)
+ misc$earg[[ii]] <- list(theta = NULL)
+
+ misc$refgp <- .refgp
+ misc$refvalue <- .refvalue
}), list( .refgp = refgp, .refvalue = refvalue ))),
loglikelihood =
@@ -1337,9 +1434,10 @@ acat.deriv = function(zeta, reverse, M, n)
deriv = eval(substitute(expression({
ans = NULL
uindex = if ( .refgp == "last") 1:M else (1:(M+1))[-( .refgp ) ]
- eta = as.matrix(eta) # in case M=1
+ eta = as.matrix(eta) # in case M = 1
for (ii in 1:nrow(eta)) {
- alpha = .brat.alpha(eta2theta(eta[ii,], "loge", earg = list()),
+ alpha = .brat.alpha(eta2theta(eta[ii,], "loge",
+ earg = list(theta = NULL)),
.refvalue, .refgp)
ymat = InverseBrat(y[ii,], NCo = M+1, diag = 0)
answer = rep(0, len = M)
@@ -1356,7 +1454,8 @@ acat.deriv = function(zeta, reverse, M, n)
weight = eval(substitute(expression({
wz = matrix(0, n, dimm(M))
for (ii in 1:nrow(eta)) {
- alpha = .brat.alpha(eta2theta(eta[ii,], "loge", earg = list()),
+ alpha = .brat.alpha(eta2theta(eta[ii,], "loge",
+ earg = list(theta = NULL)),
.refvalue, .refgp)
ymat = InverseBrat(y[ii,], NCo = M+1, diag = 0)
for (aa in 1:(M+1)) {
@@ -1381,10 +1480,10 @@ acat.deriv = function(zeta, reverse, M, n)
- bratt = function(refgp = "last",
- refvalue = 1,
- init.alpha = 1,
- i0 = 0.01)
+ bratt <- function(refgp = "last",
+ refvalue = 1,
+ init.alpha = 1,
+ i0 = 0.01)
{
if (!is.Numeric(i0, allowable.length = 1, positive = TRUE))
stop("'i0' must be a single positive value")
@@ -1392,6 +1491,7 @@ acat.deriv = function(zeta, reverse, M, n)
stop("'init.alpha' must contain positive values only")
if (!is.Numeric(refvalue, allowable.length = 1, positive = TRUE))
stop("'refvalue' must be a single positive value")
+
if (!is.character(refgp) &&
!is.Numeric(refgp, allowable.length = 1,
integer.valued = TRUE, positive = TRUE))
@@ -1421,20 +1521,24 @@ acat.deriv = function(zeta, reverse, M, n)
init.alpha = rep( .init.alpha, len = NCo-1)
ialpha0 = .i0
- etastart =
- cbind(matrix(theta2eta(init.alpha, "loge"),
+ etastart <-
+ cbind(matrix(theta2eta(init.alpha,
+ "loge",
+ list(theta = NULL)),
n, NCo-1, byrow = TRUE),
- theta2eta( rep(ialpha0, length.out = n), "loge"))
+ theta2eta(rep(ialpha0, length.out = n),
+ "loge",
+ list(theta = NULL)))
refgp = .refgp
if (!intercept.only)
warning("this function only works with intercept-only models")
- extra$ties = ties # Flat (1-row) matrix
+ extra$ties = ties # Flat (1-row) matrix
extra$ybrat.indices = .brat.indices(NCo=NCo, are.ties = FALSE)
extra$tbrat.indices = .brat.indices(NCo=NCo, are.ties = TRUE) # unused
extra$dnties = dimnames(ties)
uindex = if (refgp == "last") 1:(NCo-1) else (1:(NCo))[-refgp ]
- predictors.names = c(
+ predictors.names <- c(
namesof(paste("alpha", uindex, sep = ""), "loge", short = TRUE),
namesof("alpha0", "loge", short = TRUE))
}), list( .refgp = refgp,
@@ -1445,9 +1549,10 @@ acat.deriv = function(zeta, reverse, M, n)
probs = qprobs = NULL
M = ncol(eta)
for (ii in 1:nrow(eta)) {
- alpha = .brat.alpha(eta2theta(eta[ii, -M], "loge"),
+ alpha = .brat.alpha(eta2theta(eta[ii, -M],
+ "loge"),
.refvalue , .refgp )
- alpha0 = eta2theta(eta[ii, M], "loge")
+ alpha0 = loge(eta[ii, M], inverse = TRUE)
alpha1 = alpha[extra$ybrat.indices[, "rindex"]]
alpha2 = alpha[extra$ybrat.indices[, "cindex"]]
probs = rbind(probs, alpha1 / (alpha1+alpha2+alpha0)) #
@@ -1460,7 +1565,15 @@ acat.deriv = function(zeta, reverse, M, n)
}, list( .refgp = refgp, .refvalue = refvalue) )),
last = eval(substitute(expression({
misc$link = rep( "loge", length = M)
- names(misc$link) = c(paste("alpha",uindex, sep = ""), "alpha0")
+ names(misc$link) = c(paste("alpha", uindex, sep = ""), "alpha0")
+
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
+ for (ii in 1:M)
+ misc$earg[[ii]] <- list(theta = NULL)
+
+
misc$refgp = .refgp
misc$refvalue = .refvalue
misc$alpha = alpha
@@ -1481,9 +1594,10 @@ acat.deriv = function(zeta, reverse, M, n)
uindex = if ( .refgp == "last") 1:(M-1) else (1:(M))[-( .refgp )]
eta = as.matrix(eta)
for (ii in 1:nrow(eta)) {
- alpha = .brat.alpha(eta2theta(eta[ii, -M], "loge"),
+ alpha = .brat.alpha(eta2theta(eta[ii, -M], "loge",
+ earg = list(theta = NULL)),
.refvalue, .refgp)
- alpha0 = eta2theta(eta[ii,M], "loge") # M == ncol(eta)
+ alpha0 = loge(eta[ii, M], inverse = TRUE)
ymat = InverseBrat(y[ii,], NCo = M, diag = 0)
tmat = InverseBrat(ties[ii,], NCo = M, diag = 0)
answer = rep(0, len=NCo-1) # deriv wrt eta[-M]
@@ -1511,9 +1625,10 @@ acat.deriv = function(zeta, reverse, M, n)
weight = eval(substitute(expression({
wz = matrix(0, n, dimm(M)) # includes diagonal
for (ii in 1:nrow(eta)) {
- alpha = .brat.alpha(eta2theta(eta[ii, -M], "loge"),
+ alpha = .brat.alpha(eta2theta(eta[ii, -M], "loge",
+ earg = list(theta = NULL)),
.refvalue, .refgp)
- alpha0 = eta2theta(eta[ii,M], "loge") # M == ncol(eta)
+ alpha0 = loge(eta[ii, M], inverse = TRUE)
ymat = InverseBrat(y[ii,], NCo = M, diag = 0)
tmat = InverseBrat(ties[ii,], NCo = M, diag = 0)
@@ -1545,7 +1660,7 @@ acat.deriv = function(zeta, reverse, M, n)
}
for (sss in 1:length(uindex)) {
jay = uindex[sss]
- naj = ymat[,jay] + ymat[jay,] + tmat[,jay]
+ naj = ymat[, jay] + ymat[jay,] + tmat[, jay]
Daj = alpha[jay] + alpha + alpha0
wz[ii,iam(sss, NCo, M = NCo, diag = TRUE)] =
-alpha[jay] * alpha0 * sum(naj / Daj^2)
@@ -1557,7 +1672,7 @@ acat.deriv = function(zeta, reverse, M, n)
}
-.brat.alpha = function(vec, value, posn) {
+.brat.alpha <- function(vec, value, posn) {
if (is.character(posn))
if (posn != "last")
stop("can only handle \"last\"") else return(c(vec, value))
@@ -1566,7 +1681,7 @@ acat.deriv = function(zeta, reverse, M, n)
}
-.brat.indices = function(NCo, are.ties = FALSE) {
+.brat.indices <- function(NCo, are.ties = FALSE) {
if (!is.Numeric(NCo, allowable.length = 1,
integer.valued = TRUE) ||
NCo < 2)
@@ -1581,7 +1696,7 @@ acat.deriv = function(zeta, reverse, M, n)
}
- Brat = function(mat, ties = 0 * mat, string = c(">", "=="),
+ Brat <- function(mat, ties = 0 * mat, string = c(">", "=="),
whitespace = FALSE) {
@@ -1630,7 +1745,7 @@ acat.deriv = function(zeta, reverse, M, n)
-InverseBrat = function(yvec, NCo =
+InverseBrat <- function(yvec, NCo =
(1:900)[(1:900)*((1:900)-1) == ncol(rbind(yvec))],
multiplicity = if (is.matrix(yvec)) nrow(yvec) else 1,
diag = NA, string = c(">","=="),
@@ -1677,7 +1792,7 @@ InverseBrat = function(yvec, NCo =
-tapplymat1 = function(mat,
+tapplymat1 <- function(mat,
function.arg = c("cumsum", "diff", "cumprod"))
{
@@ -1707,13 +1822,18 @@ tapplymat1 = function(mat,
- ordpoisson = function(cutpoints,
+ ordpoisson <- function(cutpoints,
countdata = FALSE, NOS = NULL, Levels = NULL,
init.mu = NULL, parallel = FALSE, zero = NULL,
- link = "loge", earg = list()) {
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+ link = "loge") {
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+
+
fcutpoints = cutpoints[is.finite(cutpoints)]
if (!is.Numeric(fcutpoints, integer.valued = TRUE) ||
any(fcutpoints < 0))
@@ -1734,6 +1854,7 @@ tapplymat1 = function(mat,
Levels = rep(Levels, length=NOS)
}
+
new("vglmff",
blurb = c(paste("Ordinal Poisson model\n\n"),
"Link: ", namesof("mu", link, earg = earg)),
@@ -1768,7 +1889,7 @@ tapplymat1 = function(mat,
cutpoints = rep( .cutpoints, len=sum(Levels))
delete.zero.colns = FALSE
use.y = if ( .countdata ) y else matrix(0, n, sum(Levels))
- use.etastart = matrix(0, n, M)
+ use.etastart <- matrix(0, n, M)
cptr = 1
for (iii in 1:NOS) {
y = factor(orig.y[,iii], levels=(1:Levels[iii]))
@@ -1776,13 +1897,13 @@ tapplymat1 = function(mat,
eval(process.categorical.data.vgam) # Creates mustart and y
use.y[,cptr:(cptr+Levels[iii]-1)] = y
}
- use.etastart[,iii] = if (is.Numeric(initmu))
+ use.etastart[,iii] <- if (is.Numeric(initmu))
initmu[iii] else
median(cutpoints[cptr:(cptr+Levels[iii]-1-1)])
cptr = cptr + Levels[iii]
}
mustart = NULL # Overwrite it
- etastart = theta2eta(use.etastart, .link , earg = .earg)
+ etastart <- theta2eta(use.etastart, .link , earg = .earg )
y = use.y # n x sum(Levels)
M = NOS
for (iii in 1:NOS) {
@@ -1795,14 +1916,14 @@ tapplymat1 = function(mat,
extra$cutpoints = cp.vector
extra$n = n
mynames = if (M > 1) paste("mu", 1:M, sep = "") else "mu"
- predictors.names =
- namesof(mynames, .link , short = TRUE, earg = .earg)
+ predictors.names <-
+ namesof(mynames, .link , short = TRUE, earg = .earg )
}), list( .link = link, .countdata = countdata, .earg = earg,
.cutpoints=cutpoints, .NOS=NOS, .Levels=Levels,
.init.mu = init.mu
))),
linkinv = eval(substitute( function(eta, extra = NULL) {
- mu = eta2theta(eta, link= .link , earg = .earg) # Poisson means
+ mu = eta2theta(eta, link= .link , earg = .earg ) # Poisson means
mu = cbind(mu)
mu
}, list( .link = link, .earg = earg, .countdata = countdata ))),
@@ -1843,7 +1964,7 @@ tapplymat1 = function(mat,
Levels = extra$Levels
resmat = matrix(0, n, M)
dl.dprob = y / probs.use
- dmu.deta = dtheta.deta(mu, .link , earg = .earg)
+ dmu.deta = dtheta.deta(mu, .link , earg = .earg )
dprob.dmu = ordpoissonProbs(extra, mu, deriv = 1)
cptr = 1
for (iii in 1:NOS) {
@@ -1873,7 +1994,7 @@ tapplymat1 = function(mat,
-ordpoissonProbs = function(extra, mu, deriv = 0) {
+ordpoissonProbs <- function(extra, mu, deriv = 0) {
cp.vector = extra$cutpoints
NOS = extra$NOS
if (deriv == 1) {
@@ -1918,22 +2039,28 @@ ordpoissonProbs = function(extra, mu, deriv = 0) {
if (FALSE)
- scumulative = function(link = "logit", earg = list(),
- lscale = "loge", escale = list(),
- parallel = FALSE, sparallel = TRUE, reverse = FALSE,
- iscale = 1)
+ scumulative <- function(link = "logit", earg = list(),
+ lscale = "loge", escale = list(),
+ parallel = FALSE, sparallel = TRUE,
+ reverse = FALSE,
+ iscale = 1)
{
- stop("sorry, not working yet")
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (!is.list(escale)) escale = list()
- if (!is.Numeric(iscale, positive = TRUE))
- stop("bad input for argument 'iscale'")
- if (!is.logical(reverse) || length(reverse) != 1)
- stop("argument 'reverse' must be a single logical")
+ stop("sorry, not working yet")
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
+
+ if (!is.Numeric(iscale, positive = TRUE))
+ stop("bad input for argument 'iscale'")
+ if (!is.logical(reverse) || length(reverse) != 1)
+ stop("argument 'reverse' must be a single logical")
new("vglmff",
blurb = c(paste("Scaled cumulative", link, "model\n\n"),
@@ -1960,62 +2087,73 @@ ordpoissonProbs = function(extra, mu, deriv = 0) {
for (ii in 1:length(constraints))
constraints[[ii]] =
- (constraints[[ii]])[interleave.VGAM(M, M=2),, drop = FALSE]
- }), list( .parallel = parallel, .sparallel=sparallel ))),
- deviance = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- answer =
- Deviance.categorical.data.vgam(mu = mu,
- y = y, w = w, residuals = residuals,
- eta = eta, extra = extra)
- answer
- }, list( .earg = earg, .link = link ) )),
- initialize = eval(substitute(expression({
- if (intercept.only)
- stop("use cumulative() for intercept-only models")
- delete.zero.colns = TRUE # Cannot have FALSE since then prob(Y=jay)=0
- eval(process.categorical.data.vgam)
- M = 2*(ncol(y)-1)
- J = M / 2
- extra$J = J
- mynames = if ( .reverse )
- paste("P[Y>=", 2:(1+J), "]", sep = "") else
- paste("P[Y<=", 1:J, "]", sep = "")
- predictors.names = c(
- namesof(mynames, .link , short = TRUE, earg = .earg),
- namesof(paste("scale_", 1:J, sep = ""),
- .lscale, short = TRUE, earg = .escale))
- y.names = paste("mu", 1:(J+1), sep = "")
+ (constraints[[ii]])[interleave.VGAM(M, M = 2),, drop = FALSE]
+ }), list( .parallel = parallel, .sparallel=sparallel ))),
+ deviance = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ answer =
+ Deviance.categorical.data.vgam(mu = mu,
+ y = y, w = w, residuals = residuals,
+ eta = eta, extra = extra)
+ answer
+ }, list( .earg = earg, .link = link ) )),
+ initialize = eval(substitute(expression({
- if (length(dimnames(y)))
- extra$dimnamesy2 = dimnames(y)[[2]]
+ if (is.factor(y) && !is.ordered(y))
+ warning("response should be ordinal---see ordered()")
- predictors.names = predictors.names[interleave.VGAM(M, M = 2)]
- }), list( .link = link, .lscale = lscale, .reverse = reverse,
- .earg = earg, .escale = escale ))),
- linkinv = eval(substitute( function(eta, extra = NULL) {
- J = extra$J
- M = 2*J
- etamat1 = eta[, 2*(1:J)-1, drop = FALSE]
- etamat2 = eta[, 2*(1:J), drop = FALSE]
- scalemat = eta2theta(etamat2, .lscale, earg = .escale)
- fv.matrix =
- if ( .reverse ) {
- ccump = cbind(1,
- eta2theta(etamat1 / scalemat,
- .link , earg = .earg))
- cbind(-tapplymat1(ccump, "diff"), ccump[,ncol(ccump)])
- } else {
- cump = cbind(eta2theta(etamat1 / scalemat,
- .link , earg = .earg),
- 1)
- cbind(cump[, 1], tapplymat1(cump, "diff"))
- }
- if (length(extra$dimnamesy2))
- dimnames(fv.matrix) = list(dimnames(eta)[[1]],
- extra$dimnamesy2)
- fv.matrix
+ if (intercept.only)
+ stop("use cumulative() for intercept-only models")
+
+
+ delete.zero.colns = TRUE # Cannot have FALSE since then prob(Y=jay)=0
+ eval(process.categorical.data.vgam)
+
+
+ M = 2*(ncol(y)-1)
+ J = M / 2
+ extra$J = J
+ mynames = if ( .reverse )
+ paste("P[Y>=", 2:(1+J), "]", sep = "") else
+ paste("P[Y<=", 1:J, "]", sep = "")
+ predictors.names <- c(
+ namesof(mynames, .link , short = TRUE, earg = .earg ),
+ namesof(paste("scale_", 1:J, sep = ""),
+ .lscale, short = TRUE, earg = .escale ))
+
+
+ y.names = paste("mu", 1:(J+1), sep = "")
+
+ if (length(dimnames(y)))
+ extra$dimnamesy2 = dimnames(y)[[2]]
+
+ predictors.names <- predictors.names[interleave.VGAM(M, M = 2)]
+
+ }), list( .link = link, .lscale = lscale, .reverse = reverse,
+ .earg = earg, .escale = escale ))),
+ linkinv = eval(substitute( function(eta, extra = NULL) {
+ J = extra$J
+ M = 2*J
+ etamat1 = eta[, 2*(1:J)-1, drop = FALSE]
+ etamat2 = eta[, 2*(1:J), drop = FALSE]
+ scalemat = eta2theta(etamat2, .lscale, earg = .escale )
+ fv.matrix =
+ if ( .reverse ) {
+ ccump = cbind(1,
+ eta2theta(etamat1 / scalemat,
+ .link , earg = .earg ))
+ cbind(-tapplymat1(ccump, "diff"), ccump[, ncol(ccump)])
+ } else {
+ cump = cbind(eta2theta(etamat1 / scalemat,
+ .link , earg = .earg ),
+ 1)
+ cbind(cump[, 1], tapplymat1(cump, "diff"))
+ }
+ if (length(extra$dimnamesy2))
+ dimnames(fv.matrix) = list(dimnames(eta)[[1]],
+ extra$dimnamesy2)
+ fv.matrix
}, list( .link = link, .lscale = lscale, .reverse = reverse,
.earg = earg, .escale = escale ))),
last = eval(substitute(expression({
@@ -2043,10 +2181,10 @@ ordpoissonProbs = function(extra, mu, deriv = 0) {
answer = cbind(
theta2eta(if ( .reverse ) 1-cump[, 1:J] else cump[, 1:J],
.link ,
- earg = .earg),
+ earg = .earg ),
matrix(theta2eta( .iscale, .lscale , earg = .escale ),
nrow(as.matrix(mu)), J, byrow = TRUE))
- answer = answer[,interleave.VGAM(M, M=2)]
+ answer = answer[,interleave.VGAM(M, M = 2)]
answer
}, list( .link = link, .lscale = lscale, .reverse = reverse,
.iscale = iscale, .earg = earg, .escale = escale ))),
@@ -2061,7 +2199,7 @@ ordpoissonProbs = function(extra, mu, deriv = 0) {
smallno = 1.0e4 * .Machine$double.eps
if (max(abs(ycounts - round(ycounts))) > smallno)
- warning("converting 'ycounts' to integer in @loglikelihood")
+ warning("converting 'ycounts' to integer in @loglikelihood")
ycounts = round(ycounts)
sum((if (is.numeric(extra$orig.w)) extra$orig.w else 1) *
@@ -2076,19 +2214,20 @@ ordpoissonProbs = function(extra, mu, deriv = 0) {
mu.use = pmax(mu, .Machine$double.eps * 1.0e-0)
etamat1 = eta[, 2*(1:J)-1, drop = FALSE]
- etamat2 = eta[, 2*(1:J), drop = FALSE]
- scalemat = eta2theta(etamat2, .lscale, earg = .escale)
+ etamat2 = eta[, 2*(1:J) , drop = FALSE]
+ scalemat = eta2theta(etamat2, .lscale, earg = .escale )
- cump = eta2theta(etamat1 / scalemat, .link , earg = .earg)
- dcump.deta = dtheta.deta(cump, .link , earg = .earg)
- dscale.deta = dtheta.deta(scalemat, .lscale, earg = .escale)
- dl.dcump = (if ( .reverse) -w else w) *
+ cump = eta2theta(etamat1 / scalemat, .link , earg = .earg )
+ dcump.deta = dtheta.deta(cump, .link , earg = .earg )
+ dscale.deta = dtheta.deta(scalemat, .lscale, earg = .escale )
+ dl.dcump = (if ( .reverse ) -w else w) *
(y[, 1:J]/mu.use[, 1:J] - y[, -1]/mu.use[, -1])
dcump.dscale = -dcump.deta * etamat1 / scalemat^2
ans = cbind(dl.dcump * dcump.deta / scalemat,
dl.dcump * dcump.dscale * dscale.deta)
- ans = ans[,interleave.VGAM(M, M=2)]
- if (ooz) ans[,c(TRUE,FALSE)] = 0 else ans[,c(FALSE,TRUE)] = 0
+ ans = ans[,interleave.VGAM(M, M = 2)]
+ if (ooz) ans[, c(TRUE, FALSE)] = 0 else
+ ans[, c(FALSE, TRUE)] = 0
ans
}), list( .link = link, .lscale = lscale, .reverse = reverse,
.earg = earg, .escale = escale ))),
@@ -2146,7 +2285,7 @@ ordpoissonProbs = function(extra, mu, deriv = 0) {
-margeff = function(object, subset = NULL) {
+ margeff <- function(object, subset = NULL) {
ii = ii.save = subset
@@ -2287,7 +2426,7 @@ margeff = function(object, subset = NULL) {
-prplot = function(object,
+prplot <- function(object,
control = prplot.control(...), ...) {
@@ -2346,7 +2485,7 @@ prplot = function(object,
- prplot.control = function(xlab = NULL, ylab = "Probability",
+ prplot.control <- function(xlab = NULL, ylab = "Probability",
main = NULL,
xlim = NULL, ylim = NULL,
lty = par()$lty,
@@ -2369,3 +2508,91 @@ prplot = function(object,
+
+
+
+
+is.parallel.matrix <- function(object, ...)
+ is.matrix(object) && all(!is.na(object)) &&
+ all(c(object) == 1) && ncol(object) == 1
+
+
+is.parallel.vglm <- function(object, type = c("term", "lm"), ...) {
+
+ type <- match.arg(type, c("term", "lm"))[1]
+ Hlist <- constraints(object, type = type)
+
+ unlist(lapply(Hlist, is.parallel.matrix))
+}
+
+
+if (!isGeneric("is.parallel"))
+ setGeneric("is.parallel", function(object, ...)
+ standardGeneric("is.parallel"),
+ package = "VGAM")
+
+
+setMethod("is.parallel", "matrix", function(object, ...)
+ is.parallel.matrix(object, ...))
+
+
+setMethod("is.parallel", "vglm", function(object, ...)
+ is.parallel.vglm(object, ...))
+
+
+
+
+is.zero.matrix <- function(object, ...) {
+
+ rnames <- rownames(object)
+ intercept.index <- if (length(rnames)) {
+ if (any(rnames == "(Intercept)")) {
+ (1:length(rnames))[rnames == "(Intercept)"]
+ } else {
+ stop("the matrix does not seem to have an intercept")
+ NULL
+ }
+ } else {
+ stop("the matrix does not seem to have an intercept")
+ NULL
+ }
+
+ if (nrow(object) <= 1)
+ stop("the matrix needs to have more than one row, i.e., more than ",
+ "an intercept on the RHS of the formula")
+
+ cfit <- object[-intercept.index, , drop = FALSE]
+
+ foo <- function(conmat.col)
+ all(!is.na(conmat.col)) &&
+ all(c(conmat.col) == 0)
+
+ unlist(apply(cfit, 2, foo))
+}
+
+
+is.zero.vglm <- function(object, ...) {
+ is.zero.matrix(coef(object, matrix = TRUE))
+}
+
+
+if (!isGeneric("is.zero"))
+ setGeneric("is.zero", function(object, ...)
+ standardGeneric("is.zero"),
+ package = "VGAM")
+
+
+setMethod("is.zero", "matrix", function(object, ...)
+ is.zero.matrix(object, ...))
+
+
+setMethod("is.zero", "vglm", function(object, ...)
+ is.zero.vglm(object, ...))
+
+
+
+
+
+
+
+
diff --git a/R/family.censored.R b/R/family.censored.R
index 327f388..25cbdc1 100644
--- a/R/family.censored.R
+++ b/R/family.censored.R
@@ -12,288 +12,317 @@
- cenpoisson = function(link = "loge", earg = list(), imu = NULL) {
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg))
- earg = list()
-
- new("vglmff",
- blurb = c("Censored Poisson distribution\n\n",
- "Link: ", namesof("mu", link, earg = earg), "\n",
- "Variance: mu"),
- initialize = eval(substitute(expression({
- if (any(is.na(y)))
- stop("NAs are not allowed in the response")
-
- if (any(y != round(y)))
- warning("the response should be integer-valued")
- centype = attr(y, "type")
- if (centype == "right") {
- temp = y[, 2]
- extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
- extra$rightcensored = ifelse(temp == 0, TRUE, FALSE)
- extra$leftcensored = rep(FALSE, len = n)
- extra$interval = rep(FALSE, len = n)
- init.mu = pmax(y[,1], 1/8)
- } else
- if (centype == "left") {
- temp = y[, 2]
- extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
- extra$rightcensored = rep(FALSE, len = n)
- extra$leftcensored = ifelse(temp == 0, TRUE, FALSE)
- extra$interval = rep(FALSE, len = n)
- init.mu = pmax(y[,1], 1/8)
- } else
- if (centype == "interval" || centype == "interval2") {
- temp = y[, 3]
- extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
- extra$rightcensored = ifelse(temp == 0, TRUE, FALSE)
- extra$leftcensored = ifelse(temp == 2, TRUE, FALSE)
- extra$intervalcensored = ifelse(temp == 3, TRUE, FALSE)
- init.mu = pmax((y[,1] + y[,2])/2, 1/8) # for intervalcensored
- if (any(extra$uncensored))
- init.mu[extra$uncensored] = pmax(y[extra$uncensored,1], 1/8)
- if (any(extra$rightcensored))
- init.mu[extra$rightcensored] = pmax(y[extra$rightcensored,1], 1/8)
- if (any(extra$leftcensored))
- init.mu[extra$leftcensored] = pmax(y[extra$leftcensored,1], 1/8)
- } else
- if (centype == "counting") {
- stop("type == 'counting' not compatible with cenpoisson()")
- init.mu = pmax(y[,1], 1/8)
- stop("currently not working")
- } else
- stop("response have to be in a class of SurvS4")
-
- if (length( .imu )) init.mu = 0 * y[,1] + .imu
-
- predictors.names = namesof("mu", .link, earg = .earg, short = TRUE)
- if (!length(etastart))
- etastart = theta2eta(init.mu, link = .link, earg = .earg)
- }), list( .link = link, .earg = earg, .imu = imu))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- mu = eta2theta(eta, link = .link, earg = .earg)
- mu
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$expected = FALSE
- misc$link = c("mu" = .link)
- misc$earg = list("mu" = .earg)
- }), list( .link = link, .earg = earg ))),
- linkfun = eval(substitute(function(mu, extra = NULL) {
- theta2eta(mu, link = .link, earg = .earg)
- }, list( .link = link, .earg = earg ))),
- loglikelihood = function(mu, y, w, residuals = FALSE, eta,
- extra = NULL) {
- cen0 = extra$uncensored
- cenL = extra$leftcensored
- cenU = extra$rightcensored
- cenI = extra$intervalcensored
- if (residuals){
- stop("loglikelihood residuals not implemented yet")
- } else {
- sum(w[cen0] * dpois(y[cen0,1], mu[cen0], log = TRUE)) +
- sum(w[cenU] * log1p(-ppois(y[cenU,1] - 1, mu[cenU]))) +
- sum(w[cenL] * ppois(y[cenL,1] - 1, mu[cenL], log.p = TRUE)) +
- sum(w[cenI] * log(ppois(y[cenI,2], mu[cenI]) -
- ppois(y[cenI,1], mu[cenI])))
- }
- },
- vfamily = "cenpoisson",
- deriv = eval(substitute(expression({
- cen0 = extra$uncensored
- cenL = extra$leftcensored
- cenU = extra$rightcensored
- cenI = extra$intervalcensored
- lambda = eta2theta(eta, link = .link, earg = .earg)
- dl.dlambda = (y[,1] - lambda)/lambda # uncensored
- yllim = yulim = y[,1] # uncensored
- if (any(cenU)) {
- yllim[cenU] = y[cenU,1]
- densm1 = dpois(yllim-1, lambda)
- queue = ppois(yllim-1, lambda, lower.tail = FALSE)
- dl.dlambda[cenU] = densm1[cenU] / queue[cenU]
- }
- if (any(cenL)) {
- yulim[cenL] = y[cenL,1] - 1
- densm0 = dpois(yulim, lambda)
- Queue = ppois(yulim, lambda) # Left tail probability
- dl.dlambda[cenL] = -densm0[cenL] / Queue[cenL]
- }
- if (any(cenI)) {
- yllim[cenI] = y[cenI,1] + 1
- yulim[cenI] = y[cenI,2]
- Queue1 = ppois(yllim-1, lambda)
- Queue2 = ppois(yulim, lambda)
- densm02 = dpois(yulim, lambda)
- densm12 = dpois(yllim-1, lambda)
- dl.dlambda[cenI] =
- (-densm02[cenI]+densm12[cenI]) / (Queue2[cenI]-Queue1[cenI])
- }
- dlambda.deta = dtheta.deta(theta=lambda, link= .link, earg = .earg)
- w * dl.dlambda * dlambda.deta
- }), list( .link = link, .earg = earg ))),
- weight = eval(substitute(expression({
- d2lambda.deta2 = d2theta.deta2(theta=lambda, link= .link, earg = .earg)
- d2l.dlambda2 = 1 / lambda # uncensored; Fisher scoring
- if (any(cenU)) {
- densm2 = dpois(yllim-2, lambda)
- d2l.dlambda2[cenU] = (dl.dlambda[cenU])^2 -
- (densm2[cenU]-densm1[cenU])/queue[cenU]
- }
- if (any(cenL)) {
- densm1 = dpois(yulim-1, lambda)
- d2l.dlambda2[cenL] = (dl.dlambda[cenL])^2 -
- (densm0[cenL]-densm1[cenL])/Queue[cenL]
- }
- if (any(cenI)) {
- densm03 = dpois(yulim-1, lambda)
- densm13 = dpois(yllim-2, lambda)
- d2l.dlambda2[cenI] = (dl.dlambda[cenI])^2 -
- (densm13[cenI]-densm12[cenI]-densm03[cenI] +
- densm02[cenI]) / (Queue2[cenI]-Queue1[cenI])
- }
- wz = w *((dlambda.deta^2) * d2l.dlambda2)
- wz
- }), list( .link = link, .earg = earg ))))
+ cenpoisson <- function(link = "loge", imu = NULL) {
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+ new("vglmff",
+ blurb = c("Censored Poisson distribution\n\n",
+ "Link: ", namesof("mu", link, earg = earg), "\n",
+ "Variance: mu"),
+ initialize = eval(substitute(expression({
+ if (any(is.na(y)))
+ stop("NAs are not allowed in the response")
+
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 3,
+ Is.integer.y = TRUE)
+
+
+ centype = attr(y, "type")
+
+ if (centype == "right") {
+ temp = y[, 2]
+ extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
+ extra$rightcensored = ifelse(temp == 0, TRUE, FALSE)
+ extra$leftcensored = rep(FALSE, len = n)
+ extra$interval = rep(FALSE, len = n)
+ init.mu = pmax(y[, 1], 1/8)
+ } else
+ if (centype == "left") {
+ temp = y[, 2]
+ extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
+ extra$rightcensored = rep(FALSE, len = n)
+ extra$leftcensored = ifelse(temp == 0, TRUE, FALSE)
+ extra$interval = rep(FALSE, len = n)
+ init.mu = pmax(y[, 1], 1/8)
+ } else
+ if (centype == "interval" ||
+ centype == "interval2") {
+ temp = y[, 3]
+ extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
+ extra$rightcensored = ifelse(temp == 0, TRUE, FALSE)
+ extra$leftcensored = ifelse(temp == 2, TRUE, FALSE)
+ extra$intervalcensored = ifelse(temp == 3, TRUE, FALSE)
+ init.mu = pmax((y[, 1] + y[, 2])/2, 1/8) # for intervalcensored
+ if (any(extra$uncensored))
+ init.mu[extra$uncensored] = pmax(y[extra$uncensored, 1], 1/8)
+ if (any(extra$rightcensored))
+ init.mu[extra$rightcensored] = pmax(y[extra$rightcensored, 1], 1/8)
+ if (any(extra$leftcensored))
+ init.mu[extra$leftcensored] = pmax(y[extra$leftcensored, 1], 1/8)
+ } else
+ if (centype == "counting") {
+ stop("type == 'counting' not compatible with cenpoisson()")
+ init.mu = pmax(y[, 1], 1/8)
+ stop("currently not working")
+ } else
+ stop("response have to be in a class of SurvS4")
+
+ if (length( .imu )) init.mu = 0 * y[, 1] + .imu
+
+ predictors.names <-
+ namesof("mu", .link, earg = .earg, short = TRUE)
+
+ if (!length(etastart))
+ etastart = theta2eta(init.mu, link = .link, earg = .earg)
+ }), list( .link = link, .earg = earg, .imu = imu))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ mu = eta2theta(eta, link = .link, earg = .earg)
+ mu
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$expected = FALSE
+
+ misc$link = c("mu" = .link)
+
+ misc$earg = list("mu" = .earg)
+ misc$multipleResponses <- FALSE
+ }), list( .link = link, .earg = earg ))),
+ linkfun = eval(substitute(function(mu, extra = NULL) {
+ theta2eta(mu, link = .link, earg = .earg)
+ }, list( .link = link, .earg = earg ))),
+ loglikelihood = function(mu, y, w, residuals = FALSE, eta,
+ extra = NULL) {
+ cen0 = extra$uncensored
+ cenL = extra$leftcensored
+ cenU = extra$rightcensored
+ cenI = extra$intervalcensored
+ if (residuals){
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ sum(w[cen0] * dpois(y[cen0, 1], mu[cen0], log = TRUE)) +
+ sum(w[cenU] * log1p(-ppois(y[cenU, 1] - 1, mu[cenU]))) +
+ sum(w[cenL] * ppois(y[cenL, 1] - 1, mu[cenL], log.p = TRUE)) +
+ sum(w[cenI] * log(ppois(y[cenI, 2], mu[cenI]) -
+ ppois(y[cenI, 1], mu[cenI])))
+ }
+ },
+ vfamily = "cenpoisson",
+ deriv = eval(substitute(expression({
+ cen0 = extra$uncensored
+ cenL = extra$leftcensored
+ cenU = extra$rightcensored
+ cenI = extra$intervalcensored
+ lambda = eta2theta(eta, link = .link, earg = .earg)
+
+ dl.dlambda = (y[, 1] - lambda)/lambda # uncensored
+
+ yllim = yulim = y[, 1] # uncensored
+
+ if (any(cenU)) {
+ yllim[cenU] = y[cenU, 1]
+ densm1 = dpois(yllim-1, lambda)
+ queue = ppois(yllim-1, lambda, lower.tail = FALSE)
+ dl.dlambda[cenU] = densm1[cenU] / queue[cenU]
+ }
+ if (any(cenL)) {
+ yulim[cenL] = y[cenL, 1] - 1
+ densm0 = dpois(yulim, lambda)
+ Queue = ppois(yulim, lambda) # Left tail probability
+ dl.dlambda[cenL] = -densm0[cenL] / Queue[cenL]
+ }
+ if (any(cenI)) {
+ yllim[cenI] = y[cenI, 1] + 1
+ yulim[cenI] = y[cenI, 2]
+ Queue1 = ppois(yllim-1, lambda)
+ Queue2 = ppois(yulim, lambda)
+ densm02 = dpois(yulim, lambda)
+ densm12 = dpois(yllim-1, lambda)
+ dl.dlambda[cenI] =
+ (-densm02[cenI]+densm12[cenI]) / (Queue2[cenI]-Queue1[cenI])
+ }
+
+ dlambda.deta = dtheta.deta(theta=lambda, link = .link, earg = .earg)
+
+ c(w) * dl.dlambda * dlambda.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+ d2lambda.deta2 = d2theta.deta2(theta = lambda,
+ link = .link, earg = .earg)
+ d2l.dlambda2 = 1 / lambda # uncensored; Fisher scoring
+
+ if (any(cenU)) {
+ densm2 = dpois(yllim-2, lambda)
+ d2l.dlambda2[cenU] = (dl.dlambda[cenU])^2 -
+ (densm2[cenU]-densm1[cenU])/queue[cenU]
+ }
+ if (any(cenL)) {
+ densm1 = dpois(yulim-1, lambda)
+ d2l.dlambda2[cenL] = (dl.dlambda[cenL])^2 -
+ (densm0[cenL]-densm1[cenL])/Queue[cenL]
+ }
+ if (any(cenI)) {
+ densm03 = dpois(yulim-1, lambda)
+ densm13 = dpois(yllim-2, lambda)
+ d2l.dlambda2[cenI] = (dl.dlambda[cenI])^2 -
+ (densm13[cenI]-densm12[cenI]-densm03[cenI] +
+ densm02[cenI]) / (Queue2[cenI]-Queue1[cenI])
+ }
+ wz = c(w) * ((dlambda.deta^2) * d2l.dlambda2)
+ wz
+ }), list( .link = link, .earg = earg ))))
}
if (FALSE)
- cexpon =
- ecexpon = function(link = "loge", location = 0)
+ cexpon <-
+ ecexpon <- function(link = "loge", location = 0)
{
- if (!is.Numeric(location, allowable.length = 1))
- stop("bad input for 'location'")
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
-
- new("vglmff",
- blurb = c("Censored exponential distribution\n\n",
- "Link: ", namesof("rate", link, tag = TRUE), "\n",
- "Mean: ", "mu = ", location, " + 1 / ",
- namesof("rate", link, tag = FALSE), "\n",
- "Variance: ",
- if (location == 0) "Exponential: mu^2" else
- paste("(mu-", location, ")^2", sep = "")),
- initialize = eval(substitute(expression({
- extra$location = .location # This is passed into, e.g., link, deriv etc.
- if (any(y[,1] <= extra$location))
- stop("all responses must be greater than ", extra$location)
- predictors.names = namesof("rate", .link, tag = FALSE)
- type <- attr(y, "type")
- if (type == "right" || type == "left"){
- mu = y[,1] + (abs(y[,1] - extra$location) < 0.001) / 8
- }else
- if (type == "interval"){
- temp <- y[,3]
- mu = ifelse(temp == 3, y[,2] + (abs(y[,2] - extra$location)
- < 0.001)/8,
- y[,1] + (abs(y[,1] - extra$location) < 0.001) / 8)
- }
- if (!length(etastart))
- etastart = theta2eta(1/(mu-extra$location), .link)
-
- if (type == "right") {
- temp <- y[, 2]
- extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
- extra$rightcensored = ifelse(temp == 0, TRUE, FALSE)
- extra$leftcensored = rep(FALSE, len = n)
- extra$interval = rep(FALSE, len = n)
- } else
- if (type == "left") {
- temp <- y[, 2]
- extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
- extra$rightcensored = rep(FALSE, len = n)
- extra$leftcensored = ifelse(temp == 0, TRUE, FALSE)
- extra$interval = rep(FALSE, len = n)
- } else
- if (type == "counting") {
- stop("type == 'counting' not recognized")
- extra$uncensored = rep(temp == 1, TRUE, FALSE)
- extra$interval = rep(FALSE, len = n)
- extra$leftcensored = rep(FALSE, len = n)
- extra$rightcensored = rep(FALSE, len = n)
- extra$counting = ifelse(temp == 0, TRUE, FALSE)
- } else
- if (type == "interval") {
- temp <- y[, 3]
- extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
- extra$rightcensored = ifelse(temp == 0, TRUE, FALSE)
- extra$leftcensored = ifelse(temp == 2, TRUE, FALSE)
- extra$interval = ifelse(temp == 3, TRUE, FALSE)
- } else
- stop("'type' not recognized")
- #if(!length(extra$leftcensored)) extra$leftcensored = rep(FALSE, len = n)
- #if(!length(extra$rightcensored)) extra$rightcensored = rep(FALSE, len = n)
- #if(any(extra$rightcensored & extra$leftcensored))
- # stop("some observations are both right and left censored!")
- }), list( .location=location, .link = link ))),
- linkinv = eval(substitute(function(eta, extra = NULL)
- extra$location + 1 / eta2theta(eta, .link),
- list( .link = link ) )),
- last = eval(substitute(expression({
- misc$location = extra$location
- misc$link = c("rate" = .link)
- }), list( .link = link ))),
- link=eval(substitute(function(mu, extra = NULL)
- theta2eta(1/(mu-extra$location), .link),
- list( .link = link ) )),
- loglikelihood = eval(substitute(
- function(mu,y,w,residuals = FALSE,eta, extra = NULL) {
- rate = 1 / (mu - extra$location)
- cen0 = extra$uncensored
- cenL = extra$leftcensored
- cenU = extra$rightcensored
- cenI = extra$interval
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w[cenL] * log1p(-exp(-rate[cenL]*(y[cenL,1]-extra$location)))) +
- sum(w[cenU] * (-rate[cenU]*(y[cenU,1]-extra$location))) +
- sum(w[cen0] * (log(rate[cen0]) - rate[cen0]*(y[cen0,1]-extra$location)))+
- sum(w[cenI] * log(-exp(-rate[cenI]*(y[cenI,2]-extra$location))+
- exp(-rate[cenI]*(y[cenI,1]-extra$location))))
- }, list( .link = link ))),
- vfamily = c("ecexpon"),
- deriv = eval(substitute(expression({
- rate = 1 / (mu - extra$location)
- cen0 = extra$uncensored
- cenL = extra$leftcensored
- cenU = extra$rightcensored
- cenI = extra$interval
- dl.drate = 1/rate - (y[,1]-extra$location) # uncensored
- tmp200 = exp(-rate*(y[,1]-extra$location))
- tmp200b = exp(-rate*(y[,2]-extra$location)) # for interval censored
- if (any(cenL))
- dl.drate[cenL] = (y[cenL,1]-extra$location) *
- tmp200[cenL] / (1 - tmp200[cenL])
- if (any(cenU))
- dl.drate[cenU] = -(y[cenU,1]-extra$location)
- if (any(cenI))
- dl.drate[cenI] = ((y[cenI,2]-extra$location)*tmp200b[cenI]-
- (y[cenI,1]-extra$location)*tmp200[cenI])/
- (-tmp200b[cenI]+tmp200[cenI])
- drate.deta = dtheta.deta(rate, .link)
- w * dl.drate * drate.deta
- }), list( .link = link ) )),
- weight = eval(substitute(expression({
- A123 = ((mu-extra$location)^2) # uncensored d2l.drate2
- Lowpt = ifelse(cenL, y[,1], extra$location)
- Lowpt = ifelse(cenI, y[,1], Lowpt) #interval censored
- Upppt = ifelse(cenU, y[,1], Inf)
- Upppt = ifelse(cenI, y[,2], Upppt) #interval censored
- tmp300 = exp(-rate*(Lowpt - extra$location))
- d2l.drate2 = 0 * y[,1]
- ind50 = Lowpt > extra$location
- d2l.drate2[ind50] = (Lowpt[ind50]-extra$location)^2 *
- tmp300[ind50] / (1-tmp300[ind50])
- d2l.drate2 = d2l.drate2 + (exp(-rate*(Lowpt-extra$location)) -
- exp(-rate*(Upppt-extra$location))) * A123
- wz = w * (drate.deta^2) * d2l.drate2
- wz
+ if (!is.Numeric(location, allowable.length = 1))
+ stop("bad input for 'location'")
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+ new("vglmff",
+ blurb = c("Censored exponential distribution\n\n",
+ "Link: ", namesof("rate", link, tag = TRUE), "\n",
+ "Mean: ", "mu = ", location, " + 1 / ",
+ namesof("rate", link, tag = FALSE), "\n",
+ "Variance: ",
+ if (location == 0) "Exponential: mu^2" else
+ paste("(mu-", location, ")^2", sep = "")),
+ initialize = eval(substitute(expression({
+ extra$location = .location
+
+ if (any(y[, 1] <= extra$location))
+ stop("all responses must be greater than ", extra$location)
+
+ predictors.names <- namesof("rate", .link , .earg , tag = FALSE)
+
+ type <- attr(y, "type")
+ if (type == "right" || type == "left"){
+ mu = y[, 1] + (abs(y[, 1] - extra$location) < 0.001) / 8
+ }else
+ if (type == "interval"){
+ temp <- y[, 3]
+ mu = ifelse(temp == 3, y[, 2] + (abs(y[, 2] - extra$location)
+ < 0.001)/8,
+ y[, 1] + (abs(y[, 1] - extra$location) < 0.001) / 8)
+ }
+ if (!length(etastart))
+ etastart = theta2eta(1/(mu-extra$location), .link , .earg )
+
+ if (type == "right") {
+ temp <- y[, 2]
+ extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
+ extra$rightcensored = ifelse(temp == 0, TRUE, FALSE)
+ extra$leftcensored = rep(FALSE, len = n)
+ extra$interval = rep(FALSE, len = n)
+ } else
+ if (type == "left") {
+ temp <- y[, 2]
+ extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
+ extra$rightcensored = rep(FALSE, len = n)
+ extra$leftcensored = ifelse(temp == 0, TRUE, FALSE)
+ extra$interval = rep(FALSE, len = n)
+ } else
+ if (type == "counting") {
+ stop("type == 'counting' not recognized")
+ extra$uncensored = rep(temp == 1, TRUE, FALSE)
+ extra$interval = rep(FALSE, len = n)
+ extra$leftcensored = rep(FALSE, len = n)
+ extra$rightcensored = rep(FALSE, len = n)
+ extra$counting = ifelse(temp == 0, TRUE, FALSE)
+ } else
+ if (type == "interval") {
+ temp <- y[, 3]
+ extra$uncensored = ifelse(temp == 1, TRUE, FALSE)
+ extra$rightcensored = ifelse(temp == 0, TRUE, FALSE)
+ extra$leftcensored = ifelse(temp == 2, TRUE, FALSE)
+ extra$interval = ifelse(temp == 3, TRUE, FALSE)
+ } else
+ stop("'type' not recognized")
+ }), list( .location=location, .link = link ))),
+ linkinv = eval(substitute(function(eta, extra = NULL)
+ extra$location + 1 / eta2theta(eta, .link , .earg ),
+ list( .link = link ) )),
+ last = eval(substitute(expression({
+ misc$location = extra$location
+ misc$link = c("rate" = .link)
+ misc$multipleResponses <- FALSE
+ }), list( .link = link ))),
+ link = eval(substitute(function(mu, extra = NULL)
+ theta2eta(1/(mu-extra$location), .link , .earg ),
+ list( .link = link ) )),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals = FALSE,eta, extra = NULL) {
+ rate = 1 / (mu - extra$location)
+ cen0 = extra$uncensored
+ cenL = extra$leftcensored
+ cenU = extra$rightcensored
+ cenI = extra$interval
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(w[cenL] * log1p(-exp(-rate[cenL]*(y[cenL, 1]-extra$location)))) +
+ sum(w[cenU] * (-rate[cenU]*(y[cenU, 1]-extra$location))) +
+ sum(w[cen0] * (log(rate[cen0]) -
+ rate[cen0]*(y[cen0, 1]-extra$location))) +
+ sum(w[cenI] * log(-exp(-rate[cenI]*(y[cenI, 2]-extra$location))+
+ exp(-rate[cenI]*(y[cenI, 1]-extra$location))))
+ }, list( .link = link ))),
+ vfamily = c("ecexpon"),
+ deriv = eval(substitute(expression({
+ rate = 1 / (mu - extra$location)
+ cen0 = extra$uncensored
+ cenL = extra$leftcensored
+ cenU = extra$rightcensored
+ cenI = extra$interval
+ dl.drate = 1/rate - (y[, 1]-extra$location) # uncensored
+ tmp200 = exp(-rate*(y[, 1]-extra$location))
+ tmp200b = exp(-rate*(y[, 2]-extra$location)) # for interval censored
+ if (any(cenL))
+ dl.drate[cenL] = (y[cenL, 1]-extra$location) *
+ tmp200[cenL] / (1 - tmp200[cenL])
+ if (any(cenU))
+ dl.drate[cenU] = -(y[cenU, 1]-extra$location)
+ if (any(cenI))
+ dl.drate[cenI] = ((y[cenI, 2]-extra$location)*tmp200b[cenI]-
+ (y[cenI, 1]-extra$location)*tmp200[cenI])/
+ (-tmp200b[cenI]+tmp200[cenI])
+
+ drate.deta = dtheta.deta(rate, .link , .earg )
+
+ c(w) * dl.drate * drate.deta
+ }), list( .link = link ) )),
+ weight = eval(substitute(expression({
+ A123 = ((mu-extra$location)^2) # uncensored d2l.drate2
+ Lowpt = ifelse(cenL, y[, 1], extra$location)
+ Lowpt = ifelse(cenI, y[, 1], Lowpt) #interval censored
+ Upppt = ifelse(cenU, y[, 1], Inf)
+ Upppt = ifelse(cenI, y[, 2], Upppt) #interval censored
+ tmp300 = exp(-rate*(Lowpt - extra$location))
+
+ d2l.drate2 = 0 * y[, 1]
+ ind50 = Lowpt > extra$location
+
+ d2l.drate2[ind50] = (Lowpt[ind50]-extra$location)^2 *
+ tmp300[ind50] / (1-tmp300[ind50])
+ d2l.drate2 = d2l.drate2 + (exp(-rate*(Lowpt-extra$location)) -
+ exp(-rate*(Upppt-extra$location))) * A123
+
+ wz = c(w) * (drate.deta^2) * d2l.drate2
+ wz
}), list( .link = link ))))
}
@@ -301,23 +330,25 @@ if (FALSE)
- cennormal1 = function(lmu = "identity", lsd = "loge",
- emu = list(), esd = list(),
- imethod = 1,
- zero = 2)
+ cennormal1 <- function(lmu = "identity", lsd = "loge",
+ imethod = 1, zero = 2)
{
+ lmu <- as.list(substitute(lmu))
+ emu <- link2list(lmu)
+ lmu <- attr(emu, "function.name")
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu = as.character(substitute(lmu))
- if (mode(lsd) != "character" && mode(lsd) != "name")
- lsd = as.character(substitute(lsd))
- if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
+ lsd <- as.list(substitute(lsd))
+ esd <- link2list(lsd)
+ lsd <- attr(esd, "function.name")
+
+
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 2)
stop("argument 'imethod' must be 1 or 2")
- if (!is.list(emu)) emu = list()
- if (!is.list(esd)) esd = list()
new("vglmff",
@@ -326,12 +357,18 @@ if (FALSE)
namesof("sd", lsd, tag = TRUE), "\n",
"Conditional variance: sd^2"),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero=zero ))),
initialize = eval(substitute(expression({
- y = cbind(y)
- if (ncol(y) > 1)
- stop("the response must be a vector or a 1-column matrix")
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
if (!length(extra$leftcensored))
extra$leftcensored = rep(FALSE, len = n)
@@ -340,7 +377,7 @@ if (FALSE)
if (any(extra$rightcensored & extra$leftcensored))
stop("some observations are both right and left censored!")
- predictors.names =
+ predictors.names <-
c(namesof("mu", .lmu, earg =.emu, tag = FALSE),
namesof("sd", .lsd, earg =.esd, tag = FALSE))
@@ -351,7 +388,8 @@ if (FALSE)
sd.y.est = sqrt( sum(w[!i11] * junk$resid^2) / junk$df.residual )
etastart = cbind(mu = y,
rep(theta2eta(sd.y.est, .lsd), length = n))
- if (any(anyc)) etastart[anyc,1] = x[anyc,,drop = FALSE] %*% junk$coeff
+ if (any(anyc))
+ etastart[anyc, 1] = x[anyc,,drop = FALSE] %*% junk$coeff
}
}), list( .lmu = lmu, .lsd = lsd,
.emu = emu, .esd = esd,
@@ -361,9 +399,11 @@ if (FALSE)
}, list( .lmu = lmu, .emu = emu ))),
last = eval(substitute(expression({
misc$link = c("mu" = .lmu, "sd" = .lsd)
+
misc$earg = list("mu" = .emu ,"sd" = .esd )
misc$expected = TRUE
+ misc$multipleResponses <- FALSE
}), list( .lmu = lmu, .lsd = lsd,
.emu = emu, .esd = esd ))),
loglikelihood = eval(substitute(
@@ -372,8 +412,8 @@ if (FALSE)
cenU = extra$rightcensored
cen0 = !cenL & !cenU # uncensored obsns
- mum = eta2theta(eta[,1], .lmu, earg = .emu )
- sdv = eta2theta(eta[,2], .lsd, earg = .esd )
+ mum = eta2theta(eta[, 1], .lmu, earg = .emu )
+ sdv = eta2theta(eta[, 2], .lsd, earg = .esd )
Lower = ifelse(cenL, y, -Inf)
Upper = ifelse(cenU, y, Inf)
@@ -393,8 +433,8 @@ if (FALSE)
Lower = ifelse(cenL, y, -Inf)
Upper = ifelse(cenU, y, Inf)
- mum = eta2theta(eta[,1], .lmu)
- sdv = eta2theta(eta[,2], .lsd)
+ mum = eta2theta(eta[, 1], .lmu)
+ sdv = eta2theta(eta[, 2], .lsd)
dl.dmu = (y-mum) / sdv^2
dl.dsd = (((y-mum)/sdv)^2 - 1) / sdv
@@ -431,8 +471,8 @@ if (FALSE)
A3 = 1 - pnorm(( Upper - mum) / sdv) # Upper
A2 = 1 - A1 - A3 # Middle; uncensored
wz = matrix(0, n, 3)
- wz[,iam(1,1,M)] = A2 * 1 / sdv^2 # ed2l.dmu2
- wz[,iam(2,2,M)] = A2 * 2 / sdv^2 # ed2l.dsd2
+ wz[,iam(1, 1,M)] = A2 * 1 / sdv^2 # ed2l.dmu2
+ wz[,iam(2, 2,M)] = A2 * 2 / sdv^2 # ed2l.dsd2
mumL = mum - Lower
temp21L = mumL / sdv
PhiL = pnorm(temp21L)
@@ -446,9 +486,9 @@ if (FALSE)
wz.cenL11[!is.finite(wz.cenL11)] = 0
wz.cenL22[!is.finite(wz.cenL22)] = 0
wz.cenL12[!is.finite(wz.cenL12)] = 0
- wz[,iam(1,1,M)] = wz[,iam(1,1,M)] + A1 * wz.cenL11
- wz[,iam(2,2,M)] = wz[,iam(2,2,M)] + A1 * wz.cenL22
- wz[,iam(1,2,M)] = A1 * wz.cenL12
+ wz[,iam(1, 1,M)] = wz[,iam(1, 1,M)] + A1 * wz.cenL11
+ wz[,iam(2, 2,M)] = wz[,iam(2, 2,M)] + A1 * wz.cenL22
+ wz[,iam(1, 2,M)] = A1 * wz.cenL12
mumU = Upper - mum # often Inf
temp21U = mumU / sdv # often Inf
PhiU = pnorm(temp21U) # often 1
@@ -463,12 +503,12 @@ if (FALSE)
wzcenU11[!is.finite(wzcenU11)] = 0 # Needed when Upper==Inf
wzcenU22[!is.finite(wzcenU22)] = 0 # Needed when Upper==Inf
wzcenU12[!is.finite(wzcenU12)] = 0 # Needed when Upper==Inf
- wz[,iam(1,1,M)] = wz[,iam(1,1,M)] + A3 * wzcenU11
- wz[,iam(2,2,M)] = wz[,iam(2,2,M)] + A3 * wzcenU22
- wz[,iam(1,2,M)] = wz[,iam(1,2,M)] + A3 * wzcenU12
- wz[,iam(1,1,M)] = wz[,iam(1,1,M)] * dmu.deta^2
- wz[,iam(2,2,M)] = wz[,iam(2,2,M)] * dsd.deta^2
- wz[,iam(1,2,M)] = wz[,iam(1,2,M)] * dmu.deta * dsd.deta
+ wz[,iam(1, 1,M)] = wz[,iam(1, 1,M)] + A3 * wzcenU11
+ wz[,iam(2, 2,M)] = wz[,iam(2, 2,M)] + A3 * wzcenU22
+ wz[,iam(1, 2,M)] = wz[,iam(1, 2,M)] + A3 * wzcenU12
+ wz[,iam(1, 1,M)] = wz[,iam(1, 1,M)] * dmu.deta^2
+ wz[,iam(2, 2,M)] = wz[,iam(2, 2,M)] * dsd.deta^2
+ wz[,iam(1, 2,M)] = wz[,iam(1, 2,M)] * dmu.deta * dsd.deta
c(w) * wz
}), list( .lmu = lmu, .lsd = lsd ))))
}
@@ -476,81 +516,99 @@ if (FALSE)
- cenrayleigh = function(lscale = "loge", escale = list(),
- oim = TRUE) {
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (!is.logical(oim) || length(oim) != 1)
- stop("bad input for argument 'oim'")
- if (!is.list(escale)) escale = list()
-
- new("vglmff",
- blurb = c("Censored Rayleigh distribution\n\n",
- "f(y) = y*exp(-0.5*(y/scale)^2)/scale^2, y>0, scale>0\n",
- "Link: ",
- namesof("scale", lscale, earg = escale ), "\n", "\n",
- "Mean: scale * sqrt(pi / 2)"),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
-
- if (length(extra$leftcensored))
- stop("cannot handle left-censored data")
- if (!length(extra$rightcensored))
- extra$rightcensored = rep(FALSE, len = n)
-
- predictors.names =
- namesof("scale", .lscale, earg = .escale, tag = FALSE)
- if (!length(etastart)) {
- a.init = (y+1/8) / sqrt(pi/2)
- etastart = theta2eta(a.init, .lscale, earg = .escale )
- }
- }), list( .lscale = lscale, .escale = escale ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- Scale = eta2theta(eta, .lscale, earg = .escale )
- Scale * sqrt(pi/2)
- }, list( .lscale = lscale, .escale = escale ))),
- last = eval(substitute(expression({
- misc$link = c("scale" = .lscale)
- misc$earg = list("scale" = .escale)
- misc$oim = .oim
- }), list( .lscale = lscale, .escale = escale,
- .oim = oim ))),
- loglikelihood = eval(substitute(
- function(mu,y,w,residuals = FALSE,eta, extra = NULL) {
- Scale = eta2theta(eta, .lscale, earg = .escale )
- cen0 = !extra$rightcensored # uncensored obsns
- cenU = extra$rightcensored
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w[cen0] * (log(y[cen0]) - 2*log(Scale[cen0]) -
- 0.5*(y[cen0]/Scale[cen0])^2)) -
- sum(w[cenU] * (y[cenU]/Scale[cenU])^2) * 0.5
- }, list( .lscale = lscale, .escale = escale ))),
- vfamily = c("cenrayleigh"),
- deriv = eval(substitute(expression({
- cen0 = !extra$rightcensored # uncensored obsns
- cenU = extra$rightcensored
- Scale = eta2theta(eta, .lscale, earg = .escale )
- dl.dScale = ((y/Scale)^2 - 2) / Scale
- dScale.deta = dtheta.deta(Scale, .lscale, earg = .escale )
- dl.dScale[cenU] = y[cenU]^2 / Scale[cenU]^3
- w * dl.dScale * dScale.deta
- }), list( .lscale = lscale, .escale = escale ))),
- weight = eval(substitute(expression({
- ed2l.dScale2 = 4 / Scale^2
- wz = dScale.deta^2 * ed2l.dScale2
- if ( .oim ) {
- d2l.dScale2 = 3 * (y[cenU])^2 / (Scale[cenU])^4
- d2Scale.deta2 = d2theta.deta2(Scale[cenU], .lscale, earg = .escale )
- wz[cenU] = (dScale.deta[cenU])^2 * d2l.dScale2 - dl.dScale[cenU] * d2Scale.deta2
- } else {
- ed2l.dScale2[cenU] = 6 / (Scale[cenU])^2
- wz[cenU] = (dScale.deta[cenU])^2 * ed2l.dScale2[cenU]
- }
- c(w) * wz
- }), list( .lscale = lscale, .escale = escale,
- .oim = oim ))))
+ cenrayleigh <- function(lscale = "loge",
+ oim = TRUE) {
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
+
+ if (!is.logical(oim) || length(oim) != 1)
+ stop("bad input for argument 'oim'")
+
+ new("vglmff",
+ blurb = c("Censored Rayleigh distribution\n\n",
+ "f(y) = y*exp(-0.5*(y/scale)^2)/scale^2, y>0, scale>0\n",
+ "Link: ",
+ namesof("scale", lscale, earg = escale ), "\n", "\n",
+ "Mean: scale * sqrt(pi / 2)"),
+ initialize = eval(substitute(expression({
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+
+ if (length(extra$leftcensored))
+ stop("cannot handle left-censored data")
+
+ if (!length(extra$rightcensored))
+ extra$rightcensored = rep(FALSE, len = n)
+
+ predictors.names <-
+ namesof("scale", .lscale, earg = .escale, tag = FALSE)
+
+ if (!length(etastart)) {
+ a.init = (y+1/8) / sqrt(pi/2)
+ etastart = theta2eta(a.init, .lscale, earg = .escale )
+ }
+ }), list( .lscale = lscale, .escale = escale ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ Scale = eta2theta(eta, .lscale, earg = .escale )
+ Scale * sqrt(pi/2)
+ }, list( .lscale = lscale, .escale = escale ))),
+ last = eval(substitute(expression({
+ misc$link = c("scale" = .lscale)
+ misc$earg = list("scale" = .escale)
+
+ misc$oim = .oim
+ }), list( .lscale = lscale, .escale = escale,
+ .oim = oim ))),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals = FALSE,eta, extra = NULL) {
+ Scale = eta2theta(eta, .lscale, earg = .escale )
+
+ cen0 = !extra$rightcensored # uncensored obsns
+ cenU = extra$rightcensored
+
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(w[cen0] * (log(y[cen0]) - 2*log(Scale[cen0]) -
+ 0.5*(y[cen0]/Scale[cen0])^2)) -
+ sum(w[cenU] * (y[cenU]/Scale[cenU])^2) * 0.5
+ }, list( .lscale = lscale,
+ .escale = escale ))),
+ vfamily = c("cenrayleigh"),
+ deriv = eval(substitute(expression({
+ cen0 = !extra$rightcensored # uncensored obsns
+ cenU = extra$rightcensored
+
+ Scale = eta2theta(eta, .lscale, earg = .escale )
+
+ dl.dScale = ((y/Scale)^2 - 2) / Scale
+
+ dScale.deta = dtheta.deta(Scale, .lscale, earg = .escale )
+ dl.dScale[cenU] = y[cenU]^2 / Scale[cenU]^3
+
+ c(w) * dl.dScale * dScale.deta
+ }), list( .lscale = lscale,
+ .escale = escale ))),
+ weight = eval(substitute(expression({
+ ned2l.dScale2 = 4 / Scale^2
+ wz = dScale.deta^2 * ned2l.dScale2
+
+ if ( .oim ) {
+ d2l.dScale2 = 3 * (y[cenU])^2 / (Scale[cenU])^4
+ d2Scale.deta2 = d2theta.deta2(Scale[cenU], .lscale, earg = .escale )
+ wz[cenU] = (dScale.deta[cenU])^2 * d2l.dScale2 -
+ dl.dScale[cenU] * d2Scale.deta2
+ } else {
+ ned2l.dScale2[cenU] = 6 / (Scale[cenU])^2
+ wz[cenU] = (dScale.deta[cenU])^2 * ned2l.dScale2[cenU]
+ }
+
+ c(w) * wz
+ }), list( .lscale = lscale, .escale = escale,
+ .oim = oim ))))
}
@@ -560,29 +618,50 @@ if (FALSE)
- weibull = function(lshape = "loge", lscale = "loge",
- eshape = list(), escale = list(),
- ishape = NULL, iscale = NULL,
- nrfs = 1,
- imethod = 1, zero = 2)
+ weibull <-
+ function(lshape = "loge", lscale = "loge",
+ ishape = NULL, iscale = NULL,
+ nrfs = 1,
+ probs.y = c(0.2, 0.5, 0.8),
+ imethod = 1, zero = -2)
{
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+
+ lshape <- as.list(substitute(lshape))
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE))
stop("bad input for argument 'zero'")
if (!is.Numeric(imethod, allowable.length = 1,
integer.valued = TRUE, positive = TRUE) ||
imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
+ stop("argument 'imethod' must be 1 or 2")
+ if (!is.Numeric(probs.y, positive = TRUE) ||
+ length(probs.y) < 2 ||
+ max(probs.y) >= 1)
+ stop("bad input for argument 'probs.y'")
+
+
+ if (!is.Numeric(nrfs, allowable.length = 1) ||
+ nrfs < 0 ||
+ nrfs > 1)
+ stop("bad input for argument 'nrfs'")
+
+ if (length(ishape))
+ if (!is.Numeric(ishape, positive = TRUE))
+ stop("argument 'ishape' values must be positive")
+ if (length(iscale))
+ if (!is.Numeric(iscale, positive = TRUE))
+ stop("argument 'iscale' values must be positive")
- if (!is.list(eshape)) eshape = list()
- if (!is.list(escale)) escale = list()
- if (!is.Numeric(nrfs, allowable.length = 1) || nrfs < 0 || nrfs > 1)
- stop("bad input for argument 'nrfs'")
new("vglmff",
blurb = c("Weibull distribution\n\n",
@@ -592,99 +671,190 @@ if (FALSE)
"Mean: scale * gamma(1 + 1/shape)\n",
"Variance: scale^2 * (gamma(1 + 2/shape) - ",
"gamma(1 + 1/shape)^2)"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 2
+ eval(negzero.expression)
}), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ zero = .zero )
+ }, list( .zero = zero
+ ))),
+
initialize = eval(substitute(expression({
- y = cbind(y)
- if (ncol(y) > 1)
- stop("the response must be a vector or a 1-column matrix")
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+ ncoly <- ncol(y)
+ Musual <- 2
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
if (is.SurvS4(y))
- stop("only uncensored observations are allowed; don't use SurvS4()")
-
- predictors.names =
- c(namesof("shape", .lshape, earg = .eshape, tag = FALSE),
- namesof("scale", .lscale, earg = .escale, tag = FALSE))
-
- if (!length(.ishape) || !length(.iscale)) {
- anyc = FALSE # extra$leftcensored | extra$rightcensored
- i11 = if ( .imethod == 1) anyc else FALSE # can be all data
- qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
- init.shape = if (length( .ishape)) .ishape else 1
- xvec = log(-log1p(-qvec))
- fit0 = lsfit(x = xvec, y = log(quantile(y[!i11], qvec)))
- }
+ stop("only uncensored observations are allowed; ",
+ "don't use SurvS4()")
+
+
+ mynames1 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames2 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ c(namesof(mynames1, .lshape , earg = .eshape , tag = FALSE),
+ namesof(mynames2, .lscale , earg = .escale , tag = FALSE))[
+ interleave.VGAM(M, M = Musual)]
+
+
+
+ Shape.init <- matrix(if(length( .ishape )) .ishape else 0 + NA,
+ n, ncoly, byrow = TRUE)
+ Scale.init <- matrix(if(length( .iscale )) .iscale else 0 + NA,
+ n, ncoly, byrow = TRUE)
if (!length(etastart)) {
- shape = rep(if(length(.ishape)) .ishape else
- 1 / fit0$coef["X"], len = n)
- Scale = rep(if(length(.iscale)) .iscale else
- exp(fit0$coef["Intercept"]), len = n)
- etastart = cbind(theta2eta(shape, .lshape, earg = .eshape ),
- theta2eta(Scale, .lscale, earg = .escale ))
+ if (!length( .ishape ) ||
+ !length( .iscale )) {
+ for (ilocal in 1:ncoly) {
+
+ anyc <- FALSE # extra$leftcensored | extra$rightcensored
+ i11 <- if ( .imethod == 1) anyc else FALSE # can be all data
+ probs.y <- .probs.y
+ xvec <- log(-log1p(-probs.y))
+ fit0 <- lsfit(x = xvec, y = log(quantile(y[!i11, ilocal],
+ probs = probs.y )))
+
+
+ if (!is.Numeric(Shape.init[, ilocal]))
+ Shape.init[, ilocal] <- 1 / fit0$coef["X"]
+ if (!is.Numeric(Scale.init[, ilocal]))
+ Scale.init[, ilocal] <- exp(fit0$coef["Intercept"])
+ } # ilocal
+
+ etastart <-
+ cbind(theta2eta(Shape.init, .lshape , earg = .eshape ),
+ theta2eta(Scale.init, .lscale , earg = .escale ))[,
+ interleave.VGAM(M, M = Musual)]
+ }
}
}), list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape,
.iscale = iscale, .ishape = ishape,
+ .probs.y = probs.y,
.imethod = imethod ) )),
linkinv = eval(substitute(function(eta, extra = NULL) {
- shape = eta2theta(eta[,1], .lshape, earg = .eshape )
- Scale = eta2theta(eta[,2], .lscale, earg = .escale )
- Scale * gamma(1 + 1 / shape)
+ Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , earg = .eshape )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , earg = .escale )
+ Scale * gamma(1 + 1 / Shape)
}, list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape ) )),
last = eval(substitute(expression({
- if (regnotok <- any(shape <= 2))
+ regnotok <- any(Shape <= 2)
+ if (any(Shape <= 1)) {
+ warning("MLE regularity conditions are violated",
+ "(shape <= 1) at the final iteration: ",
+ "MLEs are not consistent")
+ } else if (any(1 < Shape & Shape < 2)) {
warning("MLE regularity conditions are violated",
- "(shape <= 2) at the final iteration")
+ "(1 < shape < 2) at the final iteration: ",
+ "MLEs exist but are not asymptotically normal")
+ } else if (any(2 == Shape)) {
+ warning("MLE regularity conditions are violated",
+ "(shape == 2) at the final iteration: ",
+ "MLEs exist and are normal and asymptotically ",
+ "efficient but with a slower convergence rate than when ",
+ "shape > 2")
+ }
+
+
+
+ Musual <- extra$Musual
+ misc$link <-
+ c(rep( .lshape , length = ncoly),
+ rep( .lscale , length = ncoly))[interleave.VGAM(M, M = Musual)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+ names(misc$link) <- temp.names
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- temp.names
+ for(ii in 1:ncoly) {
+ misc$earg[[Musual*ii-1]] <- .eshape
+ misc$earg[[Musual*ii ]] <- .escale
+ }
+
+ misc$Musual <- Musual
+ misc$imethod <- .imethod
+ misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
- misc$link = c(shape = .lshape, scale = .lscale)
- misc$earg = list(shape = .eshape, scale = .escale)
- misc$nrfs = .nrfs
- misc$RegCondOK = !regnotok # Save this for later
+ misc$nrfs <- .nrfs
+ misc$RegCondOK <- !regnotok # Save this for later
}), list( .lscale = lscale, .lshape = lshape,
- .escale = escale, .eshape = eshape, .nrfs = nrfs ) )),
+ .escale = escale, .eshape = eshape,
+ .imethod = imethod,
+ .nrfs = nrfs ) )),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE,eta, extra = NULL) {
- shape = eta2theta(eta[,1], .lshape, earg = .eshape )
- Scale = eta2theta(eta[,2], .lscale, earg = .escale )
- ell1 = (log(shape) - log(Scale) + (shape-1) *
- log(y / Scale) - (y / Scale)^shape)
+ Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , earg = .eshape )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , earg = .escale )
+
if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * ell1)
+ "implemented yet") else {
+ sum(c(w) * dweibull(x = y, shape = Shape, scale = Scale, log = TRUE))
+ }
}, list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape ) )),
vfamily = c("weibull"),
deriv = eval(substitute(expression({
- shape = eta2theta(eta[,1], .lshape, earg = .eshape )
- Scale = eta2theta(eta[,2], .lscale, earg = .escale )
+ Musual <- 2
+ Shape <- eta2theta(eta[, c(TRUE, FALSE)], .lshape , earg = .eshape )
+ Scale <- eta2theta(eta[, c(FALSE, TRUE)], .lscale , earg = .escale )
- dl.dshape = 1 / shape + log(y / Scale) -
- log(y / Scale) * (y / Scale)^shape
- dl.dscale = (shape / Scale) * (-1.0 + (y / Scale)^shape)
+ dl.dshape <- 1 / Shape + log(y / Scale) -
+ log(y / Scale) * (y / Scale)^Shape
+ dl.dscale <- (Shape / Scale) * (-1.0 + (y / Scale)^Shape)
- dshape.deta = dtheta.deta(shape, .lshape, earg = .eshape )
- dscale.deta = dtheta.deta(Scale, .lscale, earg = .escale )
- c(w) * cbind(dl.dshape * dshape.deta,
- dl.dscale * dscale.deta)
+ dshape.deta <- dtheta.deta(Shape, .lshape, earg = .eshape )
+ dscale.deta <- dtheta.deta(Scale, .lscale, earg = .escale )
+
+ myderiv <- c(w) * cbind(dl.dshape, dl.dscale) *
+ cbind(dshape.deta, dscale.deta)
+ myderiv[, interleave.VGAM(M, M = Musual)]
}), list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape ) )),
weight = eval(substitute(expression({
- EulerM = -digamma(1.0)
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ EulerM <- -digamma(1.0)
- ed2l.dshape = (6*(EulerM - 1)^2 + pi^2)/(6*shape^2) # KK (2003)
- ed2l.dscale = (shape / Scale)^2
- ed2l.dshapescale = (EulerM-1) / Scale
- wz[,iam(1,1,M)] = ed2l.dshape * dshape.deta^2
- wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
- wz[,iam(1,2,M)] = ed2l.dshapescale * dscale.deta * dshape.deta
+ ned2l.dshape <- (6*(EulerM - 1)^2 + pi^2)/(6*Shape^2) # KK (2003)
+ ned2l.dscale <- (Shape / Scale)^2
+ ned2l.dshapescale <- (EulerM-1) / Scale
- c(w) * wz
+ wz <- matrix(0.0, n, M + M - 1) # wz is tridiagonal
+
+ ind11 <- ind22 <- ind12 <- NULL
+ for (ii in 1:(M / Musual)) {
+ ind11 <- c(ind11, iam(Musual*ii - 1, Musual*ii - 1, M))
+ ind22 <- c(ind22, iam(Musual*ii - 0, Musual*ii - 0, M))
+ ind12 <- c(ind12, iam(Musual*ii - 1, Musual*ii - 0, M))
+ }
+ wz[, ind11] <- ned2l.dshape * dshape.deta^2
+ wz[, ind22] <- ned2l.dscale * dscale.deta^2
+ wz[, ind12] <- ned2l.dshapescale * dscale.deta * dshape.deta
+
+
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
}), list( .eshape = eshape, .nrfs = nrfs ))))
}
@@ -705,79 +875,79 @@ function (time, time2, event, type = c("right", "left", "interval",
nn <- length(time)
ng <- nargs()
if (missing(type)) {
- if (ng == 1 || ng == 2)
- type <- "right" else if (ng == 3)
- type <- "counting" else stop("Invalid number of arguments")
+ if (ng == 1 || ng == 2)
+ type <- "right" else if (ng == 3)
+ type <- "counting" else stop("Invalid number of arguments")
} else {
- type <- match.arg(type)
- ng <- ng - 1
- if (ng != 3 && (type == "interval" || type == "counting"))
- stop("Wrong number of args for this type of survival data")
- if (ng != 2 && (type == "right" || type == "left" ||
- type == "interval2"))
- stop("Wrong number of args for this type of survival data")
+ type <- match.arg(type)
+ ng <- ng - 1
+ if (ng != 3 && (type == "interval" || type == "counting"))
+ stop("Wrong number of args for this type of survival data")
+ if (ng != 2 && (type == "right" || type == "left" ||
+ type == "interval2"))
+ stop("Wrong number of args for this type of survival data")
}
who <- !is.na(time)
if (ng == 1) {
- if (!is.numeric(time))
- stop("Time variable is not numeric")
- ss <- cbind(time, 1)
- dimnames(ss) <- list(NULL, c("time", "status"))
+ if (!is.numeric(time))
+ stop("Time variable is not numeric")
+ ss <- cbind(time, 1)
+ dimnames(ss) <- list(NULL, c("time", "status"))
} else if (type == "right" || type == "left") {
- if (!is.numeric(time))
- stop("Time variable is not numeric")
- if (length(time2) != nn)
- stop("Time and status are different lengths")
- if (is.logical(time2))
- status <- 1 * time2 else if (is.numeric(time2)) {
- who2 <- !is.na(time2)
- if (max(time2[who2]) == 2)
- status <- time2 - 1 else status <- time2
- if (any(status[who2] != 0 & status[who2] != 1))
- stop("Invalid status value")
- } else stop("Invalid status value")
- ss <- cbind(time, status)
- dimnames(ss) <- list(NULL, c("time", "status"))
+ if (!is.numeric(time))
+ stop("Time variable is not numeric")
+ if (length(time2) != nn)
+ stop("Time and status are different lengths")
+ if (is.logical(time2))
+ status <- 1 * time2 else if (is.numeric(time2)) {
+ who2 <- !is.na(time2)
+ if (max(time2[who2]) == 2)
+ status <- time2 - 1 else status <- time2
+ if (any(status[who2] != 0 & status[who2] != 1))
+ stop("Invalid status value")
+ } else stop("Invalid status value")
+ ss <- cbind(time, status)
+ dimnames(ss) <- list(NULL, c("time", "status"))
} else if (type == "counting") {
- if (length(time2) != nn)
- stop("Start and stop are different lengths")
- if (length(event) != nn)
- stop("Start and event are different lengths")
- if (!is.numeric(time))
- stop("Start time is not numeric")
- if (!is.numeric(time2))
- stop("Stop time is not numeric")
- who3 <- who & !is.na(time2)
- if (any(time[who3] >= time2[who3]))
- stop("Stop time must be > start time")
- if (is.logical(event))
- status <- 1 * event else if (is.numeric(event)) {
- who2 <- !is.na(event)
- if (max(event[who2]) == 2)
- status <- event - 1 else status <- event
- if (any(status[who2] != 0 & status[who2] != 1))
- stop("Invalid status value")
- } else stop("Invalid status value")
- ss <- cbind(time - origin, time2 - origin, status)
+ if (length(time2) != nn)
+ stop("Start and stop are different lengths")
+ if (length(event) != nn)
+ stop("Start and event are different lengths")
+ if (!is.numeric(time))
+ stop("Start time is not numeric")
+ if (!is.numeric(time2))
+ stop("Stop time is not numeric")
+ who3 <- who & !is.na(time2)
+ if (any(time[who3] >= time2[who3]))
+ stop("Stop time must be > start time")
+ if (is.logical(event))
+ status <- 1 * event else if (is.numeric(event)) {
+ who2 <- !is.na(event)
+ if (max(event[who2]) == 2)
+ status <- event - 1 else status <- event
+ if (any(status[who2] != 0 & status[who2] != 1))
+ stop("Invalid status value")
+ } else stop("Invalid status value")
+ ss <- cbind(time - origin, time2 - origin, status)
} else {
- if (type == "interval2") {
- event <- ifelse(is.na(time), 2, ifelse(is.na(time2),
- 0, ifelse(time == time2, 1, 3)))
- if (any(time[event == 3] > time2[event == 3]))
- stop("Invalid interval: start > stop")
- time <- ifelse(event != 2, time, time2)
- type <- "interval"
- } else {
- temp <- event[!is.na(event)]
- if (!is.numeric(temp))
- stop("Status indicator must be numeric")
- if (length(temp) > 0 && any(temp != floor(temp) |
- temp < 0 | temp > 3))
- stop("Status indicator must be 0, 1, 2 or 3")
- }
- status <- event
- ss <- cbind(time, ifelse(!is.na(event) & event == 3,
- time2, 1), status)
+ if (type == "interval2") {
+ event <- ifelse(is.na(time), 2, ifelse(is.na(time2),
+ 0, ifelse(time == time2, 1, 3)))
+ if (any(time[event == 3] > time2[event == 3]))
+ stop("Invalid interval: start > stop")
+ time <- ifelse(event != 2, time, time2)
+ type <- "interval"
+ } else {
+ temp <- event[!is.na(event)]
+ if (!is.numeric(temp))
+ stop("Status indicator must be numeric")
+ if (length(temp) > 0 && any(temp != floor(temp) |
+ temp < 0 | temp > 3))
+ stop("Status indicator must be 0, 1, 2 or 3")
+ }
+ status <- event
+ ss <- cbind(time, ifelse(!is.na(event) & event == 3,
+ time2, 1), status)
}
attr(ss, "type") <- type
class(ss) <- "SurvS4"
@@ -800,28 +970,28 @@ setIs(class1 = "SurvS4", class2 = "matrix") # Forces vglm()@y to be a matrix
as.character.SurvS4 <-
function (x, ...)
{
- class(x) <- NULL
- type <- attr(x, "type")
-
- if (type == "right") {
- temp <- x[, 2]
- temp <- ifelse(is.na(temp), "?", ifelse(temp == 0, "+", " "))
- paste(format(x[, 1]), temp, sep = "")
- } else if (type == "counting") {
- temp <- x[, 3]
- temp <- ifelse(is.na(temp), "?", ifelse(temp == 0, "+", " "))
- paste("(", format(x[, 1]), ",", format(x[, 2]), temp, "]", sep = "")
- } else if (type == "left") {
- temp <- x[, 2]
- temp <- ifelse(is.na(temp), "?", ifelse(temp == 0, "<", " "))
- paste(temp, format(x[, 1]), sep = "")
- } else {
- stat <- x[, 3]
- temp <- c("+", "", "-", "]")[stat + 1]
- temp2 <- ifelse(stat == 3, paste("(", format(x[, 1]),
- ", ", format(x[, 2]), sep = ""), format(x[, 1]))
- ifelse(is.na(stat), as.character(NA), paste(temp2, temp, sep = ""))
- }
+ class(x) <- NULL
+ type <- attr(x, "type")
+
+ if (type == "right") {
+ temp <- x[, 2]
+ temp <- ifelse(is.na(temp), "?", ifelse(temp == 0, "+", " "))
+ paste(format(x[, 1]), temp, sep = "")
+ } else if (type == "counting") {
+ temp <- x[, 3]
+ temp <- ifelse(is.na(temp), "?", ifelse(temp == 0, "+", " "))
+ paste("(", format(x[, 1]), ",", format(x[, 2]), temp, "]", sep = "")
+ } else if (type == "left") {
+ temp <- x[, 2]
+ temp <- ifelse(is.na(temp), "?", ifelse(temp == 0, "<", " "))
+ paste(temp, format(x[, 1]), sep = "")
+ } else {
+ stat <- x[, 3]
+ temp <- c("+", "", "-", "]")[stat + 1]
+ temp2 <- ifelse(stat == 3, paste("(", format(x[, 1]),
+ ", ", format(x[, 2]), sep = ""), format(x[, 1]))
+ ifelse(is.na(stat), as.character(NA), paste(temp2, temp, sep = ""))
+ }
}
@@ -842,8 +1012,9 @@ function (x, ...)
}
}
+
is.na.SurvS4 <- function(x) {
- as.vector( (1* is.na(unclass(x)))%*% rep(1, ncol(x)) >0)
+ as.vector( (1* is.na(unclass(x)))%*% rep(1, ncol(x)) >0)
}
@@ -853,7 +1024,7 @@ is.na.SurvS4 <- function(x) {
show.SurvS4 <- function (object)
- print(as.character.SurvS4(object), quote = FALSE)
+ print.default(as.character.SurvS4(object), quote = FALSE)
diff --git a/R/family.circular.R b/R/family.circular.R
index d1b9882..0d985fd 100644
--- a/R/family.circular.R
+++ b/R/family.circular.R
@@ -10,77 +10,81 @@
-dcard = function(x, mu, rho, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
-
- L = max(length(x), length(mu), length(rho))
- x = rep(x, len=L); mu = rep(mu, len=L); rho = rep(rho, len=L);
- logdensity = rep(log(0), len=L)
- xok = (x > 0) & (x < (2*pi))
- logdensity[xok] = -log(2*pi) + log1p(2 * rho[xok] * cos(x[xok]-mu[xok]))
- logdensity[mu <= 0] = NaN
- logdensity[mu >= 2*pi] = NaN
- logdensity[rho <= -0.5] = NaN
- logdensity[rho >= 0.5] = NaN
- if (log.arg) logdensity else exp(logdensity)
+dcard <- function(x, mu, rho, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+
+ L = max(length(x), length(mu), length(rho))
+ x = rep(x, len = L); mu = rep(mu, len = L); rho = rep(rho, len = L);
+ logdensity = rep(log(0), len = L)
+ xok = (x > 0) & (x < (2*pi))
+ logdensity[xok] = -log(2*pi) + log1p(2 * rho[xok] * cos(x[xok]-mu[xok]))
+ logdensity[mu <= 0] = NaN
+ logdensity[mu >= 2*pi] = NaN
+ logdensity[rho <= -0.5] = NaN
+ logdensity[rho >= 0.5] = NaN
+ if (log.arg) logdensity else exp(logdensity)
}
-pcard = function(q, mu, rho) {
- if (!is.Numeric(mu) || any(mu < 0) || any(mu > 2*pi))
- stop("'mu' must be between 0 and 2*pi inclusive")
- if (!is.Numeric(rho) || max(abs(rho) > 0.5))
- stop("'rho' must be between -0.5 and 0.5 inclusive")
- ans = (q + 2 * rho * (sin(q-mu) + sin(mu))) / (2*pi)
- ans[q >= (2*pi)] = 1
- ans[q <= 0] = 0
- ans
+
+pcard <- function(q, mu, rho) {
+ if (!is.Numeric(mu) || any(mu < 0) || any(mu > 2*pi))
+ stop("'mu' must be between 0 and 2*pi inclusive")
+ if (!is.Numeric(rho) || max(abs(rho) > 0.5))
+ stop("'rho' must be between -0.5 and 0.5 inclusive")
+ ans = (q + 2 * rho * (sin(q-mu) + sin(mu))) / (2*pi)
+ ans[q >= (2*pi)] = 1
+ ans[q <= 0] = 0
+ ans
}
-qcard = function(p, mu, rho, tolerance=1.0e-7, maxits=500) {
- if (!is.Numeric(mu) || any(mu < 0) || any(mu > 2*pi))
- stop("'mu' must be between 0 and 2*pi inclusive")
- if (!is.Numeric(rho) || max(abs(rho) > 0.5))
- stop("'rho' must be between -0.5 and 0.5 inclusive")
- if (!is.Numeric(p, positive = TRUE) || any(p > 1))
- stop("'p' must be between 0 and 1")
- nn = max(length(p), length(mu), length(rho))
- p = rep(p, len=nn)
- mu = rep(mu, len=nn)
- rho = rep(rho, len=nn)
+qcard <- function(p, mu, rho, tolerance=1.0e-7, maxits=500) {
+ if (!is.Numeric(mu) || any(mu < 0) || any(mu > 2*pi))
+ stop("'mu' must be between 0 and 2*pi inclusive")
+ if (!is.Numeric(rho) || max(abs(rho) > 0.5))
+ stop("'rho' must be between -0.5 and 0.5 inclusive")
+ if (!is.Numeric(p, positive = TRUE) || any(p > 1))
+ stop("'p' must be between 0 and 1")
+ nn = max(length(p), length(mu), length(rho))
+ p = rep(p, len=nn)
+ mu = rep(mu, len=nn)
+ rho = rep(rho, len=nn)
- oldans = 2 * pi * p
+ oldans = 2 * pi * p
- for(its in 1:maxits) {
- ans = oldans - (oldans + 2 * rho * (sin(oldans-mu)+sin(mu)) -
- 2*pi*p) / (1 + 2 * rho * cos(oldans - mu))
- index = (ans <= 0) | (ans > 2*pi)
- if (any(index)) {
- ans[index] = runif (sum(index), 0, 2*pi)
+ for(its in 1:maxits) {
+ ans = oldans - (oldans + 2 * rho * (sin(oldans-mu)+sin(mu)) -
+ 2*pi*p) / (1 + 2 * rho * cos(oldans - mu))
+ index = (ans <= 0) | (ans > 2*pi)
+ if (any(index)) {
+ ans[index] = runif (sum(index), 0, 2*pi)
}
- if (max(abs(ans - oldans)) < tolerance) break;
- if (its == maxits) {warning("did not converge"); break}
- oldans = ans
- }
- ans
+ if (max(abs(ans - oldans)) < tolerance) break;
+ if (its == maxits) {warning("did not converge"); break}
+ oldans = ans
+ }
+ ans
}
-rcard = function(n, mu, rho, ...) {
- if (!is.Numeric(mu) || any(mu < 0) || any(mu > 2*pi))
- stop("'mu' must be between 0 and 2*pi inclusive")
- if (!is.Numeric(rho) || max(abs(rho) > 0.5))
- stop("'rho' must be between -0.5 and 0.5 inclusive")
- if (!is.Numeric(n, positive = TRUE, integer.valued = TRUE, allowable.length = 1))
- stop("'n' must be a single positive integer")
- mu = rep(mu, len=n)
- rho = rep(rho, len=n)
- qcard(runif (n), mu=mu, rho=rho, ...)
+rcard <- function(n, mu, rho, ...) {
+ if (!is.Numeric(mu) || any(mu < 0) || any(mu > 2*pi))
+ stop("argument 'mu' must be between 0 and 2*pi inclusive")
+ if (!is.Numeric(rho) || max(abs(rho) > 0.5))
+ stop("argument 'rho' must be between -0.5 and 0.5 inclusive")
+ if (!is.Numeric(n, positive = TRUE,
+ integer.valued = TRUE, allowable.length = 1))
+ stop("argument 'n' must be a single positive integer")
+
+ mu = rep(mu, len = n)
+ rho = rep(rho, len = n)
+ qcard(runif (n), mu = mu, rho = rho, ...)
}
@@ -88,222 +92,280 @@ rcard = function(n, mu, rho, ...) {
cardioid.control <- function(save.weight = TRUE, ...)
{
- list(save.weight=save.weight)
+ list(save.weight = save.weight)
}
- cardioid = function(lmu = "elogit", lrho = "elogit",
- emu = if (lmu == "elogit") list(min=0, max=2*pi) else list(),
- erho = if (lmu == "elogit") list(min=-0.5, max=0.5) else list(),
- imu = NULL, irho=0.3,
- nsimEIM=100, zero = NULL)
+
+ cardioid <- function(
+ lmu = elogit(min = 0, max = 2*pi),
+ lrho = elogit(min = -0.5, max = 0.5),
+ imu = NULL, irho = 0.3,
+ nsimEIM = 100, zero = NULL)
{
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu = as.character(substitute(lmu))
- if (mode(lrho) != "character" && mode(lrho) != "name")
- lrho = as.character(substitute(lrho))
- if (length(imu) && (!is.Numeric(imu, positive = TRUE) || any(imu > 2*pi)))
- stop("bad input for argument 'imu'")
- if (!is.Numeric(irho) || max(abs(irho)) > 0.5)
- stop("bad input for argument 'irho'")
- if (!is.list(emu)) emu = list()
- if (!is.list(erho)) erho = list()
- if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE) || nsimEIM <= 50)
- stop("'nsimEIM' should be an integer greater than 50")
-
- new("vglmff",
- blurb = c("Cardioid distribution\n\n",
- "Links: ",
- namesof("mu", lmu, earg = emu), ", ",
- namesof("rho", lrho, earg = erho, tag = FALSE), "\n",
- "Mean: ",
- "pi + (rho/pi) *",
- "((2*pi-mu)*sin(2*pi-mu)+cos(2*pi-mu)-mu*sin(mu)-cos(mu))"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(y <- cbind(y)) != 1)
- stop("the response must be a vector or one-column matrix")
- if (any((y <= 0) | (y >=2*pi)))
- stop("the response must be in (0,2*pi)")
- predictors.names = c(
- namesof("mu", .lmu, earg = .emu, tag = FALSE),
- namesof("rho", .lrho, earg = .erho, tag = FALSE))
- if (!length(etastart)) {
- rho.init = rep(if (length(.irho)) .irho else 0.3, length=n)
-
- cardioid.Loglikfun = function(mu, y, x, w, extraargs) {
- rho = extraargs$irho
- sum(w * (-log(2*pi) + log1p(2*rho*cos(y-mu))))
- }
- mu.grid = seq(0.1, 6.0, len=19)
- mu.init = if (length( .imu )) .imu else
- getMaxMin(mu.grid, objfun=cardioid.Loglikfun, y=y, x=x, w=w,
- extraargs = list(irho = rho.init))
- mu.init = rep(mu.init, length=length(y))
- etastart = cbind(theta2eta(mu.init, .lmu, earg = .emu),
- theta2eta(rho.init, .lrho, earg = .erho))
- }
- }), list( .lmu = lmu, .lrho = lrho,
- .imu = imu, .irho = irho,
- .emu = emu, .erho = erho ))),
- linkinv = eval(substitute(function(eta, extra = NULL){
- mu = eta2theta(eta[,1], link= .lmu, earg = .emu)
- rho = eta2theta(eta[,2], link= .lrho, earg = .erho)
- pi + (rho/pi) *
- ((2*pi-mu)*sin(2*pi-mu) + cos(2*pi-mu) - mu*sin(mu) - cos(mu))
- }, list( .lmu = lmu, .lrho = lrho,
- .emu = emu, .erho = erho ))),
- last = eval(substitute(expression({
- misc$link = c("mu"= .lmu, "rho"= .lrho)
- misc$earg = list("mu"= .emu, "rho"= .erho)
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
- }), list( .lmu = lmu, .lrho = lrho,
- .emu = emu, .erho = erho, .nsimEIM = nsimEIM ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mu = eta2theta(eta[,1], link= .lmu, earg = .emu)
- rho = eta2theta(eta[,2], link= .lrho, earg = .erho)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dcard(x=y, mu=mu, rho=rho, log = TRUE))
- }
- }, list( .lmu = lmu, .lrho=lrho,
- .emu = emu, .erho=erho ))),
- vfamily=c("cardioid"),
- deriv=eval(substitute(expression({
- mu = eta2theta(eta[,1], link= .lmu, earg = .emu)
- rho = eta2theta(eta[,2], link= .lrho, earg = .erho)
- dmu.deta = dtheta.deta(mu, link= .lmu, earg = .emu)
- drho.deta = dtheta.deta(rho, link= .lrho, earg = .erho)
- dl.dmu = 2 * rho * sin(y-mu) / (1 + 2 * rho * cos(y-mu))
- dl.drho = 2 * cos(y-mu) / (1 + 2 * rho * cos(y-mu))
- c(w) * cbind(dl.dmu * dmu.deta,
- dl.drho * drho.deta)
- }), list( .lmu = lmu, .lrho=lrho,
- .emu = emu, .erho=erho, .nsimEIM=nsimEIM ))),
- weight = eval(substitute(expression({
- run.varcov = 0
- ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- index0 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- for(ii in 1:( .nsimEIM )) {
- ysim = rcard(n, mu=mu, rho=rho)
- dl.dmu = 2 * rho * sin(ysim-mu) / (1 + 2 * rho * cos(ysim-mu))
- dl.drho = 2 * cos(ysim-mu) / (1 + 2 * rho * cos(ysim-mu))
- rm(ysim)
- temp3 = cbind(dl.dmu, dl.drho)
- run.varcov = ((ii-1) * run.varcov +
- temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
- }
- wz = if (intercept.only)
- matrix(colMeans(run.varcov),
- n, ncol(run.varcov), byrow = TRUE) else run.varcov
-
- dtheta.detas = cbind(dmu.deta, drho.deta)
- wz = wz * dtheta.detas[,index0$row] * dtheta.detas[,index0$col]
- c(w) * wz
- }), list( .lmu = lmu, .lrho = lrho,
- .emu = emu, .erho = erho, .nsimEIM = nsimEIM ))))
+
+ lmu <- as.list(substitute(lmu))
+ emu <- link2list(lmu)
+ lmu <- attr(emu, "function.name")
+
+ lrho <- as.list(substitute(lrho))
+ erho <- link2list(lrho)
+ lrho <- attr(erho, "function.name")
+
+
+
+ if (length(imu) && (!is.Numeric(imu, positive = TRUE) ||
+ any(imu > 2*pi)))
+ stop("bad input for argument 'imu'")
+ if (!is.Numeric(irho) || max(abs(irho)) > 0.5)
+ stop("bad input for argument 'irho'")
+
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE) ||
+ nsimEIM <= 50)
+ stop("'nsimEIM' should be an integer greater than 50")
+
+
+ new("vglmff",
+ blurb = c("Cardioid distribution\n\n",
+ "Links: ",
+ namesof("mu", lmu, earg = emu, tag = FALSE), ", ",
+ namesof("rho", lrho, earg = erho, tag = FALSE), "\n",
+ "Mean: ",
+ "pi + (rho/pi) *",
+ "((2*pi-mu)*sin(2*pi-mu)+cos(2*pi-mu)-mu*sin(mu)-cos(mu))"),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ if (any((y <= 0) | (y >=2*pi)))
+ stop("the response must be in (0, 2*pi)")
+
+ predictors.names <- c(
+ namesof("mu", .lmu, earg = .emu , tag = FALSE),
+ namesof("rho", .lrho, earg = .erho, tag = FALSE))
+
+ if (!length(etastart)) {
+ rho.init = rep(if (length(.irho)) .irho else 0.3, length=n)
+
+ cardioid.Loglikfun <- function(mu, y, x, w, extraargs) {
+ rho = extraargs$irho
+ sum(w * (-log(2*pi) + log1p(2*rho*cos(y-mu))))
+ }
+ mu.grid = seq(0.1, 6.0, len=19)
+ mu.init = if (length( .imu )) .imu else
+ getMaxMin(mu.grid, objfun=cardioid.Loglikfun, y=y, x=x, w=w,
+ extraargs = list(irho = rho.init))
+ mu.init = rep(mu.init, length=length(y))
+ etastart = cbind(theta2eta( mu.init, .lmu, earg = .emu),
+ theta2eta(rho.init, .lrho, earg = .erho))
+ }
+ }), list( .lmu = lmu, .lrho = lrho,
+ .imu = imu, .irho = irho,
+ .emu = emu, .erho = erho ))),
+ linkinv = eval(substitute(function(eta, extra = NULL){
+ mu = eta2theta(eta[, 1], link = .lmu, earg = .emu)
+ rho = eta2theta(eta[, 2], link = .lrho, earg = .erho)
+ pi + (rho/pi) *
+ ((2*pi-mu)*sin(2*pi-mu) + cos(2*pi-mu) - mu*sin(mu) - cos(mu))
+ }, list( .lmu = lmu, .lrho = lrho,
+ .emu = emu, .erho = erho ))),
+ last = eval(substitute(expression({
+ misc$link = c("mu" = .lmu, "rho" = .lrho)
+
+ misc$earg = list("mu" = .emu, "rho" = .erho)
+
+ misc$expected = TRUE
+ misc$nsimEIM = .nsimEIM
+ }), list( .lmu = lmu, .lrho = lrho,
+ .emu = emu, .erho = erho, .nsimEIM = nsimEIM ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ mu = eta2theta(eta[, 1], link = .lmu, earg = .emu)
+ rho = eta2theta(eta[, 2], link = .lrho, earg = .erho)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(w * dcard(x = y, mu = mu, rho = rho, log = TRUE))
+ }
+ }, list( .lmu = lmu, .lrho=lrho,
+ .emu = emu, .erho=erho ))),
+ vfamily = c("cardioid"),
+ deriv = eval(substitute(expression({
+ mu = eta2theta(eta[, 1], link = .lmu, earg = .emu)
+ rho = eta2theta(eta[, 2], link = .lrho, earg = .erho)
+
+ dmu.deta = dtheta.deta(mu, link = .lmu, earg = .emu)
+ drho.deta = dtheta.deta(rho, link = .lrho, earg = .erho)
+
+ dl.dmu = 2 * rho * sin(y-mu) / (1 + 2 * rho * cos(y-mu))
+ dl.drho = 2 * cos(y-mu) / (1 + 2 * rho * cos(y-mu))
+ c(w) * cbind(dl.dmu * dmu.deta,
+ dl.drho * drho.deta)
+ }), list( .lmu = lmu, .lrho=lrho,
+ .emu = emu, .erho=erho, .nsimEIM=nsimEIM ))),
+ weight = eval(substitute(expression({
+ run.varcov = 0
+ ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ index0 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ for(ii in 1:( .nsimEIM )) {
+ ysim = rcard(n, mu=mu, rho=rho)
+ dl.dmu = 2 * rho * sin(ysim-mu) / (1 + 2 * rho * cos(ysim-mu))
+ dl.drho = 2 * cos(ysim-mu) / (1 + 2 * rho * cos(ysim-mu))
+ rm(ysim)
+ temp3 = cbind(dl.dmu, dl.drho)
+ run.varcov = ((ii-1) * run.varcov +
+ temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
+ }
+ wz = if (intercept.only)
+ matrix(colMeans(run.varcov),
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
+
+ dtheta.detas = cbind(dmu.deta, drho.deta)
+ wz = wz * dtheta.detas[,index0$row] * dtheta.detas[,index0$col]
+ c(w) * wz
+ }), list( .lmu = lmu, .lrho = lrho,
+ .emu = emu, .erho = erho, .nsimEIM = nsimEIM ))))
}
- vonmises = function(llocation = "elogit",
- lscale = "loge",
- elocation = if (llocation == "elogit") list(min = 0, max = 2*pi)
- else list(),
- escale = list(),
- ilocation = NULL, iscale = NULL,
- imethod=1, zero = NULL) {
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
- imethod > 2) stop("argument 'imethod' must be 1 or 2")
- if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
- if (!is.list(escale)) escale = list()
-
- new("vglmff",
- blurb = c("Von Mises distribution\n\n",
- "Links: ",
- namesof("location", llocation, earg = elocation), ", ",
- namesof("scale", lscale, earg =escale),
- "\n", "\n",
- "Mean: location"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("location", .llocation, earg = .elocation, tag = FALSE),
- namesof("scale", .lscale, earg =.escale, tag = FALSE))
- if (!length(etastart)) {
- if ( .imethod == 1) {
- location.init = mean(y)
- rat10 = sqrt((sum(w*cos(y )))^2 + sum(w*sin(y))^2) / sum(w)
- scale.init = sqrt(1 - rat10)
- } else {
- location.init = median(y)
- scale.init = sqrt(sum(w*abs(y - location.init)) / sum(w))
- }
- location.init = if (length(.ilocation)) rep(.ilocation, len=n) else
- rep(location.init, len=n)
- scale.init= if (length(.iscale)) rep(.iscale,len=n) else rep(1,len=n)
- etastart = cbind(
- theta2eta(location.init, .llocation, earg = .elocation),
- theta2eta(scale.init, .lscale, earg = .escale))
+ vonmises <- function(llocation = elogit(min = 0, max = 2*pi),
+ lscale = "loge",
+ ilocation = NULL, iscale = NULL,
+ imethod = 1, zero = NULL) {
+
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+ ilocat <- ilocation
+
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+
+ new("vglmff",
+ blurb = c("Von Mises distribution\n\n",
+ "Links: ",
+ namesof("location", llocat, earg = elocat), ", ",
+ namesof("scale", lscale, earg = escale),
+ "\n", "\n",
+ "Mean: location"),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ zero = .zero ,
+ parameterNames = c("location", "scale"))
+ }, list( .zero = zero ))),
+
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y)
+
+
+ predictors.names <-
+ c(namesof("location", .llocat, earg = .elocat, tag = FALSE),
+ namesof("scale", .lscale, earg = .escale, tag = FALSE))
+
+ if (!length(etastart)) {
+ if ( .imethod == 1) {
+ locat.init = mean(y)
+ rat10 = sqrt((sum(w*cos(y )))^2 + sum(w*sin(y))^2) / sum(w)
+ scale.init = sqrt(1 - rat10)
+ } else {
+ locat.init = median(y)
+ scale.init = sqrt(sum(w*abs(y - locat.init)) / sum(w))
}
- y = y %% (2*pi) # Coerce after initial values have been computed
- }), list( .imethod = imethod, .ilocation = ilocation,
- .escale = escale, .iscale = iscale,
- .lscale = lscale, .llocation = llocation, .elocation = elocation ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[,1], .llocation, earg = .elocation) %% (2*pi)
- }, list( .escale = escale, .lscale = lscale,
- .llocation = llocation, .elocation = elocation ))),
- last = eval(substitute(expression({
- misc$link = c(location = .llocation, scale = .lscale)
- misc$earg = list(location = .elocation, scale = .escale )
- }), list( .llocation = llocation, .lscale = lscale,
- .elocation = elocation, .escale = escale ))),
- loglikelihood = eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra = NULL) {
- location = eta2theta(eta[,1], .llocation, earg = .elocation)
- Scale = eta2theta(eta[,2], .lscale, earg = .escale)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * (Scale * cos(y - location) -
- log(mbesselI0(x = Scale ))))
- }, list( .escale = escale, .lscale = lscale,
- .llocation = llocation, .elocation = elocation ))),
- vfamily = c("vonmises"),
- deriv = eval(substitute(expression({
- location = eta2theta(eta[,1], .llocation, earg = .elocation)
- Scale = eta2theta(eta[,2], .lscale, earg = .escale)
- tmp6 = mbesselI0(x=Scale, deriv=2)
- dl.dlocation = Scale * sin(y - location)
- dlocation.deta = dtheta.deta(location, .llocation, earg = .elocation)
- dl.dscale = cos(y - location) - tmp6[,2] / tmp6[,1]
- dscale.deta = dtheta.deta(Scale, .lscale, earg = .escale)
- c(w) * cbind(dl.dlocation * dlocation.deta,
- dl.dscale * dscale.deta)
- }), list( .escale = escale, .lscale = lscale,
- .llocation = llocation, .elocation = elocation ))),
- weight = eval(substitute(expression({
- d2l.location2 = Scale * tmp6[,2] / tmp6[,1]
- d2l.dscale2 = tmp6[,3] / tmp6[,1] - (tmp6[,2] / tmp6[,1])^2
- wz = matrix(as.numeric(NA), nrow=n, ncol=2) # diagonal
- wz[,iam(1,1,M)] = d2l.location2 * dlocation.deta^2
- wz[,iam(2,2,M)] = d2l.dscale2 * dscale.deta^2
- c(w) * wz
- }), list( .escale = escale, .lscale = lscale,
- .llocation = llocation, .elocation = elocation ))))
+
+ locat.init = if (length( .ilocat ))
+ rep( .ilocat , len=n) else
+ rep(locat.init, len=n)
+ scale.init = if (length( .iscale ))
+ rep( .iscale , len = n) else rep(1, len = n)
+ etastart = cbind(
+ theta2eta(locat.init, .llocat, earg = .elocat),
+ theta2eta(scale.init, .lscale, earg = .escale))
+ }
+ y = y %% (2*pi) # Coerce after initial values have been computed
+ }), list( .imethod = imethod, .ilocat = ilocat,
+ .escale = escale, .elocat = elocat,
+ .lscale = lscale, .llocat = llocat,
+ .iscale = iscale ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[, 1], .llocat, earg = .elocat) %% (2*pi)
+ }, list( .escale = escale, .lscale = lscale,
+ .llocat = llocat, .elocat = elocat ))),
+ last = eval(substitute(expression({
+ misc$link = c(location = .llocat, scale = .lscale)
+ misc$earg = list(location = .elocat, scale = .escale )
+
+
+
+ }), list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale ))),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals= FALSE,eta, extra = NULL) {
+ locat = eta2theta(eta[, 1], .llocat, earg = .elocat)
+ Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(w * (Scale * cos(y - locat) -
+ log(mbesselI0(x = Scale ))))
+ }, list( .escale = escale, .lscale = lscale,
+ .llocat = llocat, .elocat = elocat ))),
+ vfamily = c("vonmises"),
+ deriv = eval(substitute(expression({
+ locat = eta2theta(eta[, 1], .llocat, earg = .elocat)
+ Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+
+ tmp6 = mbesselI0(x=Scale, deriv=2)
+ dl.dlocat = Scale * sin(y - locat)
+ dl.dscale = cos(y - locat) - tmp6[, 2] / tmp6[, 1]
+
+ dlocat.deta = dtheta.deta(locat, .llocat ,
+ earg = .elocat )
+ dscale.deta = dtheta.deta(Scale, .lscale, earg = .escale)
+
+ c(w) * cbind(dl.dlocat * dlocat.deta,
+ dl.dscale * dscale.deta)
+ }), list( .escale = escale, .lscale = lscale,
+ .llocat = llocat, .elocat = elocat ))),
+ weight = eval(substitute(expression({
+ d2l.dlocat2 = Scale * tmp6[, 2] / tmp6[, 1]
+ d2l.dscale2 = tmp6[, 3] / tmp6[, 1] - (tmp6[, 2] / tmp6[, 1])^2
+ wz = matrix(as.numeric(NA), nrow = n, ncol = 2) # diagonal
+ wz[,iam(1, 1, M)] = d2l.dlocat2 * dlocat.deta^2
+ wz[,iam(2, 2, M)] = d2l.dscale2 * dscale.deta^2
+ c(w) * wz
+ }), list( .escale = escale, .elocat = elocat,
+ .lscale = lscale, .llocat = llocat ))))
}
diff --git a/R/family.exp.R b/R/family.exp.R
index 2ad0e35..624f572 100644
--- a/R/family.exp.R
+++ b/R/family.exp.R
@@ -52,7 +52,7 @@ qeunif <- function(p, min = 0, max = 1, Maxit_nr = 10, Tol_nr = 1.0e-6) {
peunif <- function(q, min = 0, max = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
if (any(min >= max))
@@ -75,7 +75,7 @@ peunif <- function(q, min = 0, max = 1, log = FALSE) {
deunif <- function(x, min = 0, max = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
if (any(min >= max))
@@ -145,7 +145,7 @@ qenorm <- function(p, mean = 0, sd = 1, Maxit_nr = 10,
penorm <- function(q, mean = 0, sd = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -166,7 +166,7 @@ penorm <- function(q, mean = 0, sd = 1, log = FALSE) {
denorm <- function(x, mean = 0, sd = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -238,7 +238,7 @@ qeexp <- function(p, rate = 1, Maxit_nr = 10, Tol_nr = 1.0e-6) {
peexp <- function(q, rate = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -261,7 +261,7 @@ peexp <- function(q, rate = 1, log = FALSE) {
deexp <- function(x, rate = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
if (any(rate <= 0))
@@ -294,7 +294,7 @@ reexp <- function(n, rate = 1) {
dkoenker <- function(x, location = 0, scale = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -311,7 +311,7 @@ dkoenker <- function(x, location = 0, scale = 1, log = FALSE) {
pkoenker <- function(q, location = 0, scale = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -356,7 +356,6 @@ rkoenker <- function(n, location = 0, scale = 1) {
koenker <- function(percentile = 50,
llocation = "identity", lscale = "loge",
- elocation = list(), escale = list(),
ilocation = NULL, iscale = NULL,
imethod = 1,
zero = 2)
@@ -364,23 +363,23 @@ rkoenker <- function(n, location = 0, scale = 1) {
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+ ilocat <- ilocation
- llocat = llocation
- elocat = elocation
- ilocat = ilocation
- if (mode(llocat) != "character" && mode(llocat) != "name")
- llocat <- as.character(substitute(llocat))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale <- as.character(substitute(lscale))
if (length(ilocat) &&
(!is.Numeric(ilocat, allowable.length = 1, positive = TRUE)))
stop("bad input for argument 'ilocation'")
if (length(iscale) && !is.Numeric(iscale))
stop("bad input for argument 'iscale'")
- if (!is.list(elocat)) elocat = list()
- if (!is.list(escale)) escale = list()
if (!is.Numeric(percentile, positive = TRUE) ||
any(percentile >= 100))
@@ -390,6 +389,7 @@ rkoenker <- function(n, location = 0, scale = 1) {
imethod > 2)
stop("'imethod' must be 1 or 2")
+
new("vglmff",
blurb = c("Koenker distribution\n\n",
"Links: ",
@@ -401,14 +401,23 @@ rkoenker <- function(n, location = 0, scale = 1) {
constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
- if (ncol(y <- cbind(y)) != 1)
- stop("the response must be a vector or one-column matrix")
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 1,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
predictors.names <- c(
namesof("location", .llocat, earg = .elocat, tag = FALSE),
namesof("scale", .lscale, earg = .escale, tag = FALSE))
- if (!length(etastart)) {
+
+ if (!length(etastart)) {
locat.init <- if ( .imethod == 2) {
weighted.mean(y, w)
} else {
@@ -440,10 +449,13 @@ rkoenker <- function(n, location = 0, scale = 1) {
.percentile = percentile ))),
last = eval(substitute(expression({
misc$link <- c("location" = .llocat, "scale" = .lscale)
+
misc$earg <- list("location" = .elocat, "scale" = .escale)
+
misc$expected <- TRUE
misc$percentile <- .percentile
misc$imethod <- .imethod
+ misc$multipleResponses <- FALSE
ncoly <- ncol(y)
for(ii in 1:length( .percentile )) {
@@ -506,13 +518,3 @@ rkoenker <- function(n, location = 0, scale = 1) {
-
-
-
-
-
-
-
-
-
-
diff --git a/R/family.extremes.R b/R/family.extremes.R
index edf7203..1ee4b0e 100644
--- a/R/family.extremes.R
+++ b/R/family.extremes.R
@@ -12,30 +12,39 @@
+
+
+
+
+
+
rgev <- function(n, location = 0, scale = 1, shape = 0) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
- stop("bad input for argument 'n'") else n
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
if (!is.Numeric(location))
stop("bad input for argument argument 'location'")
if (!is.Numeric(shape))
stop("bad input for argument argument 'shape'")
- ans = numeric(use.n)
- shape = rep(shape, length.out = use.n);
- location = rep(location, length.out = use.n);
- scale = rep(scale, length.out = use.n)
+ ans <- numeric(use.n)
+ if (length(shape) != use.n)
+ shape <- rep(shape, length.out = use.n)
+ if (length(location) != use.n)
+ location <- rep(location, length.out = use.n);
+ if (length(scale) != use.n)
+ scale <- rep(scale, length.out = use.n)
- scase = abs(shape) < sqrt(.Machine$double.eps)
- nscase = sum(scase)
+ scase <- abs(shape) < sqrt(.Machine$double.eps)
+ nscase <- sum(scase)
if (use.n - nscase)
- ans[!scase] = location[!scase] + scale[!scase] *
+ ans[!scase] <- location[!scase] + scale[!scase] *
((-log(runif(use.n - nscase)))^(-shape[!scase]) -1) / shape[!scase]
if (nscase)
- ans[scase] = rgumbel(nscase, location[scase], scale[scase])
- ans[scale <= 0] = NaN
+ ans[scase] <- rgumbel(nscase, location[scase], scale[scase])
+ ans[scale <= 0] <- NaN
ans
}
@@ -44,7 +53,7 @@ rgev <- function(n, location = 0, scale = 1, shape = 0) {
dgev <- function(x, location = 0, scale = 1, shape = 0, log = FALSE,
tolshape0 = sqrt(.Machine$double.eps),
oobounds.log = -Inf, giveWarning = FALSE) {
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
if (oobounds.log > 0)
@@ -54,34 +63,40 @@ rgev <- function(n, location = 0, scale = 1, shape = 0) {
stop("bad input for argument 'tolshape0'")
use.n = max(length(x), length(location), length(scale), length(shape))
- shape = rep(shape, length.out = use.n)
- location = rep(location, length.out = use.n);
- scale = rep(scale, length.out = use.n);
- x = rep(x, length.out = use.n)
+ if (length(shape) != use.n)
+ shape <- rep(shape, length.out = use.n)
+ if (length(location) != use.n)
+ location <- rep(location, length.out = use.n);
+ if (length(scale) != use.n)
+ scale <- rep(scale, length.out = use.n)
- logdensity = rep(log(0), length.out = use.n)
- scase = abs(shape) < tolshape0
- nscase = sum(scase)
+
+
+ x <- rep(x, length.out = use.n)
+
+ logdensity <- rep(log(0), length.out = use.n)
+ scase <- abs(shape) < tolshape0
+ nscase <- sum(scase)
if (use.n - nscase) {
- zedd = 1+shape*(x-location)/scale # pmax(0, (1+shape*xc/scale))
- xok = (!scase) & (zedd > 0)
- logdensity[xok] = -log(scale[xok]) - zedd[xok]^(-1/shape[xok]) -
+ zedd <- 1+shape*(x-location)/scale # pmax(0, (1+shape*xc/scale))
+ xok <- (!scase) & (zedd > 0)
+ logdensity[xok] <- -log(scale[xok]) - zedd[xok]^(-1/shape[xok]) -
(1 + 1/shape[xok]) * log(zedd[xok])
- outofbounds = (!scase) & (zedd <= 0)
+ outofbounds <- (!scase) & (zedd <= 0)
if (any(outofbounds)) {
- logdensity[outofbounds] = oobounds.log
- no.oob = sum(outofbounds)
+ logdensity[outofbounds] <- oobounds.log
+ no.oob <- sum(outofbounds)
if (giveWarning)
warning(no.oob, " observation",
ifelse(no.oob > 1, "s are", " is"), " out of bounds")
}
}
if (nscase) {
- logdensity[scase] = dgumbel(x[scase], location = location[scase],
- scale = scale[scase], log = TRUE)
+ logdensity[scase] <- dgumbel(x[scase], location = location[scase],
+ scale = scale[scase], log = TRUE)
}
- logdensity[scale <= 0] = NaN
+ logdensity[scale <= 0] <- NaN
if (log.arg) logdensity else exp(logdensity)
}
@@ -95,23 +110,27 @@ pgev <- function(q, location = 0, scale = 1, shape = 0) {
if (!is.Numeric(shape))
stop("bad input for argument 'shape'")
- use.n = max(length(q), length(location), length(scale), length(shape))
- ans = numeric(use.n)
- shape = rep(shape, length.out = use.n)
- location = rep(location, length.out = use.n);
- scale = rep(scale, length.out = use.n)
- q = rep(q - location, length.out = use.n)
-
- scase = abs(shape) < sqrt(.Machine$double.eps)
- nscase = sum(scase)
+ use.n <- max(length(q), length(location), length(scale), length(shape))
+ ans <- numeric(use.n)
+ if (length(shape) != use.n)
+ shape <- rep(shape, length.out = use.n)
+ if (length(location) != use.n)
+ location <- rep(location, length.out = use.n);
+ if (length(scale) != use.n)
+ scale <- rep(scale, length.out = use.n)
+ if (length(q) != use.n)
+ q <- rep(q - location, length.out = use.n)
+
+ scase <- abs(shape) < sqrt(.Machine$double.eps)
+ nscase <- sum(scase)
if (use.n - nscase) {
- zedd = pmax(0, (1 + shape * q / scale))
- ans[!scase] = exp(-zedd[!scase]^(-1 / shape[!scase]))
+ zedd <- pmax(0, (1 + shape * q / scale))
+ ans[!scase] <- exp(-zedd[!scase]^(-1 / shape[!scase]))
}
if (nscase) {
- ans[scase] = pgumbel(q[scase], location[scase], scale[scase])
+ ans[scase] <- pgumbel(q[scase], location[scase], scale[scase])
}
- ans[scale <= 0] = NaN
+ ans[scale <= 0] <- NaN
ans
}
@@ -125,22 +144,27 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
if (!is.Numeric(shape))
stop("bad input for argument 'shape'")
- use.n = max(length(p), length(location), length(scale), length(shape))
- ans = numeric(use.n)
- shape = rep(shape, length.out = use.n)
- location = rep(location, length.out = use.n);
- scale = rep(scale, length.out = use.n);
- p = rep(p, length.out = use.n)
+ use.n <- max(length(p), length(location), length(scale), length(shape))
+ ans <- numeric(use.n)
+ if (length(shape) != use.n)
+ shape <- rep(shape, length.out = use.n)
+ if (length(location) != use.n)
+ location <- rep(location, length.out = use.n);
+ if (length(scale) != use.n)
+ scale <- rep(scale, length.out = use.n)
+ if (length(p) != use.n)
+ p <- rep(p, length.out = use.n)
- scase = abs(shape) < sqrt(.Machine$double.eps)
- nscase = sum(scase)
+
+ scase <- abs(shape) < sqrt(.Machine$double.eps)
+ nscase <- sum(scase)
if (use.n - nscase) {
- ans[!scase] = location[!scase] + scale[!scase] *
+ ans[!scase] <- location[!scase] + scale[!scase] *
((-log(p[!scase]))^(-shape[!scase]) - 1) / shape[!scase]
}
if (nscase)
- ans[scase] = qgumbel(p[scase], location[scase], scale[scase])
- ans[scale <= 0] = NaN
+ ans[scase] <- qgumbel(p[scase], location[scase], scale[scase])
+ ans[scale <= 0] <- NaN
ans
}
@@ -148,13 +172,10 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
- gev <- function(llocation = "identity",
+ gev <- function(
+ llocation = "identity",
lscale = "loge",
- lshape = "logoff",
- elocation = list(),
- escale = list(),
- eshape = if (lshape == "logoff") list(offset = 0.5) else
- if (lshape == "elogit") list(min = -0.5, max = 0.5) else list(),
+ lshape = logoff(offset = 0.5),
percentiles = c(95, 99),
iscale = NULL, ishape = NULL,
imethod = 1, gshape = c(-0.45, 0.45),
@@ -163,210 +184,282 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
{
- if (!is.logical(giveWarning) || length(giveWarning) != 1)
- stop("bad input for argument 'giveWarning'")
- mean = FALSE
- if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
- stop("bad input for argument 'iscale'")
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
- if (!mean && length(percentiles) &&
- (!is.Numeric(percentiles, positive = TRUE) ||
- max(percentiles) >= 100))
- stop("bad input for argument 'percentiles'")
- if (!is.Numeric(imethod, allowable.length = 1,
- positive = TRUE, integer.valued = TRUE) ||
- imethod > 2.5)
- stop("argument 'imethod' must be 1 or 2")
- if (length(ishape) && !is.Numeric(ishape))
- stop("bad input for argument 'ishape'")
- if (!is.Numeric(tolshape0, allowable.length = 1, positive = TRUE) ||
- tolshape0 > 0.1)
- stop("bad input for argument 'tolshape0'")
- if (!is.Numeric(gshape, allowable.length = 2) ||
- gshape[1] >= gshape[2])
- stop("bad input for argument 'gshape'")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
- if (!is.list(elocation)) elocation = list()
- if (!is.list(escale)) escale = list()
- if (!is.list(eshape)) eshape = list()
+ lshape <- as.list(substitute(lshape))
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
- new("vglmff",
- blurb = c("Generalized extreme value distribution\n",
- "Links: ",
- namesof("location", link = llocation, earg = elocation), ", ",
- namesof("scale", link = lscale, earg = escale), ", ",
- namesof("shape", link = lshape, earg = eshape)),
- constraints=eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- predictors.names =
- c(namesof("location", .llocation, earg = .elocation, short = TRUE),
- namesof("scale", .lscale, earg = .escale, short = TRUE),
- namesof("shape", .lshape, earg = .eshape, short = TRUE))
- y = as.matrix(y)
- if (ncol(y) > 1)
- y = -t(apply(-y, 1, sort, na.last = TRUE))
-
- r.vec = rowSums(cbind(!is.na(y)))
-
-
- if (any(r.vec == 0))
- stop("A row contains all missing values")
-
- extra$percentiles = .percentiles
- if (!length(etastart)) {
- init.sig = if (length( .iscale))
- rep( .iscale, length.out = nrow(y)) else NULL
- init.xi = if (length( .ishape))
- rep( .ishape, length.out = nrow(y)) else NULL
- eshape = .eshape
- if ( .lshape == "elogit" && length(init.xi) &&
- (any(init.xi <= eshape$min |
- init.xi >= eshape$max)))
- stop("bad input for argument 'eshape'")
- if ( .imethod == 1) {
- nvector = 4:10 # Arbitrary; could be made an argument
- ynvector = quantile(y[, 1], probs = 1-1/nvector)
- objecFunction = -Inf # Actually the log-likelihood
- est.sigma = !length(init.sig)
- gshape = .gshape
- temp234 = if (length(init.xi)) init.xi[1] else
- seq(gshape[1], gshape[2], length.out = 12)
- for(xi.try in temp234) {
- xvec = if (abs(xi.try) < .tolshape0) log(nvector) else
- (nvector^xi.try - 1) / xi.try
- fit0 = lsfit(x = xvec, y=ynvector, intercept = TRUE)
- sigmaTry = if (est.sigma)
- rep(fit0$coef["X"], length.out = nrow(y)) else
- init.sig
- muTry = rep(fit0$coef["Intercept"], length.out = nrow(y))
- llTry = egev(giveWarning=
- FALSE)@loglikelihood(mu = NULL, y=y[, 1], w=w,
- residuals = FALSE,
- eta =
- cbind(theta2eta(muTry, .llocation,earg = .elocation),
- theta2eta(sigmaTry, .lscale,earg = .escale),
- theta2eta(xi.try, link= .lshape, earg = .eshape)))
- if (llTry >= objecFunction) {
- if (est.sigma)
- init.sig = sigmaTry
- init.mu = rep(muTry, length.out = nrow(y))
- objecFunction = llTry
- bestxi = xi.try
- }
+
+
+ if (!is.logical(giveWarning) || length(giveWarning) != 1)
+ stop("bad input for argument 'giveWarning'")
+
+ mean <- FALSE
+ if (length(iscale) &&
+ !is.Numeric(iscale, positive = TRUE))
+ stop("bad input for argument 'iscale'")
+
+
+
+ if (!mean && length(percentiles) &&
+ (!is.Numeric(percentiles, positive = TRUE) ||
+ max(percentiles) >= 100))
+ stop("bad input for argument 'percentiles'")
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ positive = TRUE, integer.valued = TRUE) ||
+ imethod > 2.5)
+ stop("argument 'imethod' must be 1 or 2")
+ if (length(ishape) && !is.Numeric(ishape))
+ stop("bad input for argument 'ishape'")
+
+ if (!is.Numeric(tolshape0, allowable.length = 1, positive = TRUE) ||
+ tolshape0 > 0.1)
+ stop("bad input for argument 'tolshape0'")
+ if (!is.Numeric(gshape, allowable.length = 2) ||
+ gshape[1] >= gshape[2])
+ stop("bad input for argument 'gshape'")
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+ new("vglmff",
+ blurb = c("Generalized extreme value distribution\n",
+ "Links: ",
+ namesof("location", llocat, elocat), ", ",
+ namesof("scale", lscale, escale), ", ",
+ namesof("shape", lshape, eshape)),
+ constraints=eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(Musual = 3,
+ multipleResponses = FALSE,
+ zero = .zero )
+ }, list( .zero = zero ))),
+
+
+ initialize = eval(substitute(expression({
+ Musual <- extra$Musual <- 3
+ ncoly <- ncol(y)
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+
+
+ mynames1 <- "location"
+ mynames2 <- "scale"
+ mynames3 <- "shape"
+ llocat <- .llocat
+ lscale <- .lscale
+ lshape <- .lshape
+
+
+ predictors.names <- c(
+ namesof(mynames1, .llocat , .elocat , short = TRUE),
+ namesof(mynames2, .lscale , .escale , short = TRUE),
+ namesof(mynames3, .lshape , .eshape , short = TRUE))
+
+
+
+
+ y = as.matrix(y)
+
+
+
+
+
+ if (ncol(y) > 1)
+ y = -t(apply(-y, 1, sort, na.last = TRUE))
+
+
+
+
+
+
+
+ r.vec = rowSums(cbind(!is.na(y)))
+
+
+ if (any(r.vec == 0))
+ stop("A row contains all missing values")
+
+ extra$percentiles = .percentiles
+ if (!length(etastart)) {
+ init.sig = if (length( .iscale ))
+ rep( .iscale, length.out = nrow(y)) else NULL
+ init.xi = if (length( .ishape ))
+ rep( .ishape, length.out = nrow(y)) else NULL
+ LIST.lshape = .lshape
+
+ if ( .lshape == "elogit" && length(init.xi) &&
+ (any(init.xi <= LIST.lshape$min |
+ init.xi >= LIST.lshape$max)))
+ stop("bad input for an argument in 'lshape'")
+
+ if ( .imethod == 1) {
+ nvector = 4:10 # Arbitrary; could be made an argument
+ ynvector = quantile(y[, 1], probs = 1-1/nvector)
+ objecFunction = -Inf # Actually the log-likelihood
+ est.sigma = !length(init.sig)
+ gshape = .gshape
+ temp234 = if (length(init.xi)) init.xi[1] else
+ seq(gshape[1], gshape[2], length.out = 12)
+ for(shapeTry in temp234) {
+ xvec = if (abs(shapeTry) < .tolshape0) log(nvector) else
+ (nvector^shapeTry - 1) / shapeTry
+ fit0 = lsfit(x = xvec, y=ynvector, intercept = TRUE)
+ sigmaTry = if (est.sigma)
+ rep(fit0$coef["X"], length.out = nrow(y)) else
+ init.sig
+ LocatTry = rep(fit0$coef["Intercept"], length.out = nrow(y))
+ llTry = egev(giveWarning =
+ FALSE)@loglikelihood(mu = NULL, y = y[, 1], w = w,
+ residuals = FALSE,
+ eta =
+ cbind(theta2eta(LocatTry, .llocat , .elocat ),
+ theta2eta(sigmaTry, .lscale , .escale ),
+ theta2eta(shapeTry, .lshape , .eshape )))
+ if (llTry >= objecFunction) {
+ if (est.sigma)
+ init.sig = sigmaTry
+ init.mu = rep(LocatTry, length.out = nrow(y))
+ objecFunction = llTry
+ bestxi = shapeTry
}
- if (!length(init.xi))
- init.xi = rep(bestxi, length.out = nrow(y))
- } else {
- init.xi = rep(0.05, length.out = nrow(y))
- if (!length(init.sig))
- init.sig = rep(sqrt(6 * var(y[, 1]))/pi,
- length.out = nrow(y))
- EulerM = -digamma(1)
- init.mu = rep(median(y[, 1]) - EulerM*init.sig,
- length.out = nrow(y))
}
+ if (!length(init.xi))
+ init.xi = rep(bestxi, length.out = nrow(y))
+ } else {
+ init.xi = rep(0.05, length.out = nrow(y))
+ if (!length(init.sig))
+ init.sig = rep(sqrt(6 * var(y[, 1]))/pi,
+ length.out = nrow(y))
+ EulerM <- -digamma(1)
+ init.mu = rep(median(y[, 1]) - EulerM*init.sig,
+ length.out = nrow(y))
+ }
- bad = ((1 + init.xi*(y-init.mu)/init.sig) <= 0)
- if (fred <- sum(bad)) {
- warning(paste(fred, "observations violating boundary",
- "constraints while initializing. Taking corrective action."))
- init.xi[bad] = ifelse(y[bad] > init.mu[bad], 0.1, -0.1)
- }
+ bad = ((1 + init.xi*(y-init.mu)/init.sig) <= 0)
+ if (fred <- sum(bad)) {
+ warning(paste(fred, "observations violating boundary",
+ "constraints while initializing. Taking corrective action."))
+ init.xi[bad] = ifelse(y[bad] > init.mu[bad], 0.1, -0.1)
+ }
+
+ etastart <-
+ cbind(theta2eta(init.mu, .llocat , .elocat ),
+ theta2eta(init.sig, .lscale , .escale ),
+ theta2eta(init.xi, .lshape , .eshape ))
+ }
+ }), list(
+ .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape,
+ .ishape = ishape, .iscale = iscale,
+
+ .gshape = gshape,
+ .percentiles = percentiles,
+ .tolshape0 = tolshape0,
+ .imethod = imethod, .giveWarning = giveWarning ))),
+
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ Locat <- eta2theta(eta[, 1], .llocat , .elocat )
+ sigma <- eta2theta(eta[, 2], .lscale , .escale )
+ shape <- eta2theta(eta[, 3], .lshape , .eshape )
+
+ is.zero = (abs(shape) < .tolshape0 )
+ cent = extra$percentiles
+ LP = length(cent)
+ fv = matrix(as.numeric(NA), nrow(eta), LP)
+ if (LP) {
+ for(ii in 1:LP) {
+ yp = -log(cent[ii]/100)
+ fv[!is.zero,ii] = Locat[!is.zero] - sigma[!is.zero] *
+ (1 - yp^(-shape[!is.zero])) / shape[!is.zero]
+ fv[is.zero,ii] = Locat[is.zero] - sigma[is.zero] * log(yp)
+ }
+ dimnames(fv) = list(dimnames(eta)[[1]],
+ paste(as.character(cent), "%", sep = ""))
+ } else {
+ EulerM <- -digamma(1)
+ fv = Locat + sigma * EulerM # When shape = 0, is Gumbel
+ fv[!is.zero] = Locat[!is.zero] + sigma[!is.zero] *
+ (gamma(1-shape[!is.zero])-1) / shape[!is.zero]
+ fv[shape >= 1] = NA # Mean exists only if shape < 1.
+ }
+ fv
+ }, list(
+ .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape,
+
+ .tolshape0 = tolshape0 ))),
+ last = eval(substitute(expression({
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- c(mynames1, mynames2, mynames3)
+ misc$earg[[1]] <- .elocat
+ misc$earg[[2]] <- .escale
+ misc$earg[[3]] <- .eshape
+
+ misc$link <- c( .llocat , .lscale , .lshape )
+ names(misc$link) <- c(mynames1, mynames2, mynames3)
+
+ misc$Musual <- Musual
+ misc$expected <- TRUE
+ misc$multipleResponses <- FALSE
- etastart =
- cbind(theta2eta(init.mu, .llocation, earg = .elocation),
- theta2eta(init.sig, .lscale, earg = .escale),
- theta2eta(init.xi, .lshape, earg = .eshape))
- }
- }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
- .elocation = elocation, .escale = escale, .eshape = eshape,
- .iscale = iscale, .ishape = ishape,
- .gshape = gshape,
- .percentiles = percentiles,
- .tolshape0 = tolshape0,
- .imethod = imethod, .giveWarning= giveWarning ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- loc = eta2theta(eta[, 1], .llocation, earg = .elocation)
- sigma = eta2theta(eta[, 2], .lscale, earg = .escale)
- xi = eta2theta(eta[,3], .lshape, earg = .eshape)
- is.zero = (abs(xi) < .tolshape0)
- cent = extra$percentiles
- LP = length(cent)
- fv = matrix(as.numeric(NA), nrow(eta), LP)
- if (LP) {
- for(ii in 1:LP) {
- yp = -log(cent[ii]/100)
- fv[!is.zero,ii] = loc[!is.zero] - sigma[!is.zero] *
- (1 - yp^(-xi[!is.zero])) / xi[!is.zero]
- fv[is.zero,ii] = loc[is.zero] - sigma[is.zero] * log(yp)
- }
- dimnames(fv) = list(dimnames(eta)[[1]],
- paste(as.character(cent), "%", sep = ""))
- } else {
- EulerM = -digamma(1)
- fv = loc + sigma * EulerM # When xi = 0, is Gumbel
- fv[!is.zero] = loc[!is.zero] + sigma[!is.zero] *
- (gamma(1-xi[!is.zero])-1) / xi[!is.zero]
- fv[xi >= 1] = NA # Mean exists only if xi < 1.
- }
- fv
- }, list( .llocation = llocation, .lscale = lscale,
- .lshape = lshape,
- .eshape = eshape, .tolshape0 = tolshape0 ))),
- last = eval(substitute(expression({
- misc$links = c(location = .llocation,
- scale = .lscale,
- shape = .lshape)
- misc$true.mu = !length( .percentiles) # @fitted is not a true mu
- misc$percentiles = .percentiles
- misc$earg = list(location = .elocation,
- scale = .escale,
- shape = .eshape)
- misc$expected = TRUE
- misc$tolshape0 = .tolshape0
- if (ncol(y) == 1)
- y = as.vector(y)
- if (any(xi < -0.5))
- warning("some values of the shape parameter are less than -0.5")
- }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
- .elocation = elocation, .escale = escale, .eshape = eshape,
- .tolshape0 = tolshape0, .percentiles = percentiles ))),
- loglikelihood = eval(substitute(
- function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
- mmu = eta2theta(eta[, 1], .llocation, earg = .elocation)
- sigma = eta2theta(eta[, 2], .lscale, earg = .escale)
- xi = eta2theta(eta[,3], .lshape, earg = .eshape)
- is.zero = (abs(xi) < .tolshape0)
- zedd = (y-mmu) / sigma
- r.vec = rowSums(cbind(!is.na(y)))
- A = 1 + xi * (y-mmu)/sigma
- ii = 1:nrow(eta)
- A1 = A[cbind(ii, r.vec)]
- mytolerance = 0 # .Machine$double.eps
- if (any(bad <- (A1 <= mytolerance), na.rm = TRUE)) {
- cat("There are", sum(bad),
- "range violations in @loglikelihood\n")
- flush.console()
- }
- igev = !is.zero & !bad
- igum = is.zero & !bad
- pow = 1 + 1/xi[igev]
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
+
+
+
+
+
+
+ misc$true.mu = !length( .percentiles) # @fitted is not a true mu
+ misc$percentiles = .percentiles
+ misc$expected = TRUE
+ misc$tolshape0 = .tolshape0
+ if (ncol(y) == 1)
+ y = as.vector(y)
+ if (any(shape < -0.5))
+ warning("some values of the shape parameter are less than -0.5")
+ }), list(
+ .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape,
+
+ .tolshape0 = tolshape0, .percentiles = percentiles ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE,eta,extra = NULL) {
+ Locat <- eta2theta(eta[, 1], .llocat , .elocat )
+ sigma <- eta2theta(eta[, 2], .lscale , .escale )
+ shape <- eta2theta(eta[, 3], .lshape , .eshape )
+
+
+ is.zero = (abs(shape) < .tolshape0)
+ zedd = (y-Locat) / sigma
+ r.vec = rowSums(cbind(!is.na(y)))
+ A = 1 + shape * (y-Locat)/sigma
+ ii = 1:nrow(eta)
+ A1 = A[cbind(ii, r.vec)]
+ mytolerance = 0 # .Machine$double.eps
+ if (any(bad <- (A1 <= mytolerance), na.rm = TRUE)) {
+ cat("There are", sum(bad),
+ "range violations in @loglikelihood\n")
+ flush.console()
+ }
+ igev = !is.zero & !bad
+ igum = is.zero & !bad
+ pow = 1 + 1/shape[igev]
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
old.answer =
sum(bad) * (-1.0e10) +
@@ -375,40 +468,54 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
rowSums(cbind(zedd, na.rm = TRUE)))) +
sum(w[igev] * (-r.vec[igev]*log(sigma[igev]) -
pow*rowSums(cbind(log(A[igev])), na.rm = TRUE) -
- A1[igev]^(-1/xi[igev])))
+ A1[igev]^(-1/shape[igev])))
old.answer
- }
- }, list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
- .elocation = elocation, .escale = escale, .eshape = eshape,
- .giveWarning = giveWarning, .tolshape0 = tolshape0 ))),
- vfamily = c("gev", "vextremes"),
- deriv = eval(substitute(expression({
- r.vec = rowSums(cbind(!is.na(y)))
- mmu = eta2theta(eta[, 1], .llocation, earg = .elocation)
- sigma = eta2theta(eta[, 2], .lscale, earg = .escale)
- xi = eta2theta(eta[,3], .lshape, earg = .eshape)
- is.zero = (abs(xi) < .tolshape0)
- ii = 1:nrow(eta)
- zedd = (y-mmu) / sigma
- A = 1 + xi * zedd
+ }
+ }, list(
+ .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape,
+
+
+
+ .giveWarning = giveWarning, .tolshape0 = tolshape0 ))),
+ vfamily = c("gev", "vextremes"),
+ deriv = eval(substitute(expression({
+ Musual <- 3
+ r.vec = rowSums(cbind(!is.na(y)))
+
+ Locat <- eta2theta(eta[, 1], .llocat , .elocat )
+ sigma <- eta2theta(eta[, 2], .lscale , .escale )
+ shape <- eta2theta(eta[, 3], .lshape , .eshape )
+
+
+
+ dmu.deta <- dtheta.deta(Locat, .llocat , .elocat )
+ dsi.deta <- dtheta.deta(sigma, .lscale , .escale )
+ dxi.deta <- dtheta.deta(shape, .lshape , .eshape )
+
+
+ is.zero = (abs(shape) < .tolshape0)
+ ii = 1:nrow(eta)
+ zedd = (y-Locat) / sigma
+ A = 1 + shape * zedd
dA.dxi = zedd # matrix
- dA.dmu = -xi/sigma # vector
- dA.dsigma = -xi*zedd/sigma # matrix
- pow = 1 + 1/xi
+ dA.dmu = -shape/sigma # vector
+ dA.dsigma = -shape*zedd/sigma # matrix
+ pow = 1 + 1/shape
A1 = A[cbind(ii, r.vec)]
- AAr1 = dA.dmu/(xi * A1^pow) -
+ AAr1 = dA.dmu/(shape * A1^pow) -
pow * rowSums(cbind(dA.dmu/A), na.rm = TRUE)
- AAr2 = dA.dsigma[cbind(ii,r.vec)] / (xi * A1^pow) -
+ AAr2 = dA.dsigma[cbind(ii,r.vec)] / (shape * A1^pow) -
pow * rowSums(cbind(dA.dsigma/A), na.rm = TRUE)
- AAr3 = 1/(xi * A1^pow) -
+ AAr3 = 1/(shape * A1^pow) -
pow * rowSums(cbind(dA.dsigma/A), na.rm = TRUE)
dl.dmu = AAr1
dl.dsi = AAr2 - r.vec/sigma
- dl.dxi = rowSums(cbind(log(A)), na.rm = TRUE)/xi^2 -
+ dl.dxi = rowSums(cbind(log(A)), na.rm = TRUE)/shape^2 -
pow * rowSums(cbind(dA.dxi/A), na.rm = TRUE) -
- (log(A1) / xi^2 -
- dA.dxi[cbind(ii,r.vec)] / (xi*A1)) * A1^(-1/xi)
+ (log(A1) / shape^2 -
+ dA.dxi[cbind(ii,r.vec)] / (shape*A1)) * A1^(-1/shape)
if (any(is.zero)) {
zorro = c(zedd[cbind(1:n,r.vec)])
@@ -417,71 +524,79 @@ qgev <- function(p, location = 0, scale = 1, shape = 0) {
dl.dmu[is.zero] = (1-ezedd) / sigma[is.zero]
dl.dsi[is.zero] = (zorro * (1-ezedd) - 1) / sigma[is.zero]
dl.dxi[is.zero] = zorro * ((1 - ezedd) * zorro / 2 - 1)
- }
- dmu.deta = dtheta.deta(mmu, .llocation, earg = .elocation)
- dsi.deta = dtheta.deta(sigma, .lscale, earg = .escale)
- dxi.deta = dtheta.deta(xi, .lshape, earg = .eshape)
- c(w) * cbind(dl.dmu * dmu.deta,
- dl.dsi * dsi.deta,
- dl.dxi * dxi.deta)
- }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
- .elocation = elocation, .escale = escale, .eshape = eshape,
- .tolshape0 = tolshape0 ))),
- weight = eval(substitute(expression({
- kay = -xi
- dd = digamma(r.vec-kay+1)
- ddd = digamma(r.vec+1) # Unnecessarily evaluated at each iteration
- temp13 = -kay * dd + (kay^2 - kay + 1) / (1-kay)
- temp33 = 1 - 2 * kay * ddd +
- kay^2 * (1 + trigamma(r.vec+1) + ddd^2)
- temp23 = -kay * dd + (1+(1-kay)^2) / (1-kay)
- GR.gev = function(j, ri, kay) gamma(ri - j*kay + 1) / gamma(ri)
- tmp2 = (1-kay)^2 * GR.gev(2, r.vec, kay) # Latter is GR2
- tmp1 = (1-2*kay) * GR.gev(1, r.vec, kay) # Latter is GR1
- k0 = (1-2*kay)
- k1 = k0 * kay
- k2 = k1 * kay
- k3 = k2 * kay # kay^3 * (1-2*kay)
- wz = matrix(as.numeric(NA), n, 6)
- wz[, iam(1, 1, M)] = tmp2 / (sigma^2 * k0)
- wz[, iam(1, 2, M)] = (tmp2 - tmp1) / (sigma^2 * k1)
- wz[, iam(1, 3, M)] = (tmp1 * temp13 - tmp2) / (sigma * k2)
- wz[, iam(2, 2, M)] = (r.vec*k0 - 2*tmp1 + tmp2) / (sigma^2 * k2)
- wz[, iam(2, 3, M)] = (r.vec*k1*ddd + tmp1 *
- temp23 - tmp2 - r.vec*k0) / (sigma * k3)
- wz[, iam(3, 3, M)] = (2*tmp1*(-temp13) + tmp2 +
- r.vec*k0*temp33)/(k3*kay)
+ }
- if (any(is.zero)) {
- if (ncol(y) > 1)
- stop("cannot handle xi == 0 with a multivariate response")
+ c(w) * cbind(dl.dmu * dmu.deta,
+ dl.dsi * dsi.deta,
+ dl.dxi * dxi.deta)
+ }), list(
+ .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape,
- EulerM = -digamma(1)
- wz[is.zero, iam(2, 2, M)] = (pi^2/6 + (1-EulerM)^2) / sigma^2
- wz[is.zero, iam(3, 3, M)] = 2.4236
- wz[is.zero, iam(1, 2, M)] = (digamma(2) +
- 2*(EulerM-1)) / sigma^2
- wz[is.zero, iam(1, 3, M)]= -(trigamma(1)/2 + digamma(1)*
- (digamma(1)/2+1))/sigma
- wz[is.zero, iam(2, 3, M)] = (-dgammadx(2,3)/6 + dgammadx(1, 1) +
- 2*dgammadx(1, 2) +
- 2*dgammadx(1,3)/3) / sigma
+ .tolshape0 = tolshape0 ))),
- if (FALSE ) {
+ weight = eval(substitute(expression({
+ kay = -shape
+ dd = digamma(r.vec-kay+1)
+ ddd = digamma(r.vec+1) # Unnecessarily evaluated at each iteration
+ temp13 = -kay * dd + (kay^2 - kay + 1) / (1-kay)
+ temp33 = 1 - 2 * kay * ddd +
+ kay^2 * (1 + trigamma(r.vec+1) + ddd^2)
+ temp23 = -kay * dd + (1+(1-kay)^2) / (1-kay)
+ GR.gev = function(j, ri, kay) gamma(ri - j*kay + 1) / gamma(ri)
+ tmp2 = (1-kay)^2 * GR.gev(2, r.vec, kay) # Latter is GR2
+ tmp1 = (1-2*kay) * GR.gev(1, r.vec, kay) # Latter is GR1
+ k0 = (1-2*kay)
+ k1 = k0 * kay
+ k2 = k1 * kay
+ k3 = k2 * kay # kay^3 * (1-2*kay)
+
+ wz = matrix(as.numeric(NA), n, 6)
+ wz[, iam(1, 1, M)] = tmp2 / (sigma^2 * k0)
+ wz[, iam(1, 2, M)] = (tmp2 - tmp1) / (sigma^2 * k1)
+ wz[, iam(1, 3, M)] = (tmp1 * temp13 - tmp2) / (sigma * k2)
+ wz[, iam(2, 2, M)] = (r.vec*k0 - 2*tmp1 + tmp2) / (sigma^2 * k2)
+ wz[, iam(2, 3, M)] = (r.vec*k1*ddd + tmp1 *
+ temp23 - tmp2 - r.vec*k0) / (sigma * k3)
+ wz[, iam(3, 3, M)] = (2*tmp1*(-temp13) + tmp2 +
+ r.vec*k0*temp33)/(k3*kay)
+
+ if (any(is.zero)) {
+ if (ncol(y) > 1)
+ stop("cannot handle shape == 0 with a multivariate response")
+
+ EulerM <- -digamma(1)
+ wz[is.zero, iam(2, 2, M)] = (pi^2/6 + (1-EulerM)^2) / sigma^2
+ wz[is.zero, iam(3, 3, M)] = 2.4236
+ wz[is.zero, iam(1, 2, M)] = (digamma(2) +
+ 2*(EulerM-1)) / sigma^2
+ wz[is.zero, iam(1, 3, M)]= -(trigamma(1)/2 + digamma(1)*
+ (digamma(1)/2+1))/sigma
+ wz[is.zero, iam(2, 3, M)] = (-dgammadx(2, 3)/6 + dgammadx(1, 1) +
+ 2*dgammadx(1, 2) +
+ 2*dgammadx(1, 3)/3) / sigma
+
+ if (FALSE ) {
wz[, iam(1, 2, M)] = 2 * r.vec / sigma^2
wz[, iam(2, 2, M)] = -4 * r.vec * digamma(r.vec+1) + 2 * r.vec +
(4 * dgammadx(r.vec+1, deriv.arg = 1) -
3 * dgammadx(r.vec+1, deriv.arg = 2)) / gamma(r.vec) # Not checked
}
}
- wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] * dmu.deta^2
- wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)] * dsi.deta^2
- wz[, iam(3, 3, M)] = wz[, iam(3,3, M)] * dxi.deta^2
- wz[, iam(1, 2, M)] = wz[, iam(1, 2, M)] * dmu.deta * dsi.deta
- wz[, iam(1, 3, M)] = wz[, iam(1,3, M)] * dmu.deta * (-dxi.deta)
- wz[, iam(2, 3, M)] = wz[, iam(2,3, M)] * dsi.deta * (-dxi.deta)
- c(w) * wz
- }), list( .eshape = eshape ))))
+
+ wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] * dmu.deta^2
+ wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)] * dsi.deta^2
+ wz[, iam(3, 3, M)] = wz[, iam(3, 3, M)] * dxi.deta^2
+ wz[, iam(1, 2, M)] = wz[, iam(1, 2, M)] * dmu.deta * dsi.deta
+ wz[, iam(1, 3, M)] = wz[, iam(1, 3, M)] * dmu.deta * (-dxi.deta)
+ wz[, iam(2, 3, M)] = wz[, iam(2, 3, M)] * dsi.deta * (-dxi.deta)
+ c(w) * wz
+ }), list(
+ .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape
+
+ ))))
+
}
@@ -514,11 +629,7 @@ dgammadx <- function(x, deriv.arg = 1) {
egev <- function(llocation = "identity",
lscale = "loge",
- lshape = "logoff",
- elocation = list(),
- escale = list(),
- eshape = if (lshape == "logoff") list(offset = 0.5) else
- if (lshape == "elogit") list(min = -0.5, max = 0.5) else list(),
+ lshape = logoff(offset = 0.5),
percentiles = c(95, 99),
iscale = NULL, ishape = NULL,
imethod = 1, gshape = c(-0.45, 0.45),
@@ -529,12 +640,23 @@ dgammadx <- function(x, deriv.arg = 1) {
stop("bad input for argument 'giveWarning'")
if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale <- as.character(substitute(lscale))
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation <- as.character(substitute(llocation))
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape <- as.character(substitute(lshape))
+
+
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+ lshape <- as.list(substitute(lshape))
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
+
+
+
+
if (!is.Numeric(gshape, allowable.length = 2) ||
gshape[1] >= gshape[2])
stop("bad input for argument 'gshape'")
@@ -555,30 +677,37 @@ dgammadx <- function(x, deriv.arg = 1) {
!is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (!is.list(elocation)) elocation = list()
- if (!is.list(escale)) escale = list()
- if (!is.list(eshape)) eshape = list()
new("vglmff",
blurb = c("Generalized extreme value distribution\n",
"Links: ",
- namesof("location", link = llocation, earg = elocation), ", ",
- namesof("scale", link = lscale, earg = escale), ", ",
- namesof("shape", link = lshape, earg = eshape)),
+ namesof("location", link = llocat, earg = elocat), ", ",
+ namesof("scale", link = lscale, earg = escale), ", ",
+ namesof("shape", link = lshape, earg = eshape)),
constraints=eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
- predictors.names =
- c(namesof("location", .llocation , earg = .elocation , short = TRUE),
- namesof("scale", .lscale, earg = .escale, short = TRUE),
- namesof("shape", .lshape, earg = .eshape, short = TRUE))
+ predictors.names <-
+ c(namesof("location", .llocat , earg = .elocat , short = TRUE),
+ namesof("scale", .lscale , earg = .escale , short = TRUE),
+ namesof("shape", .lshape , earg = .eshape , short = TRUE))
+
+
+
+
+
if (ncol(as.matrix(y)) != 1)
- stop("response must be a vector or one-column matrix")
+ stop("response must be a vector or one-column matrix")
+
+
+
+
+
if (!length(etastart)) {
- init.sig = if (length( .iscale))
+ init.sig = if (length( .iscale ))
rep( .iscale, length.out = length(y)) else NULL
- init.xi = if (length( .ishape))
+ init.xi = if (length( .ishape ))
rep( .ishape, length.out = length(y)) else NULL
eshape = .eshape
if ( .lshape == "elogit" && length(init.xi) &&
@@ -605,9 +734,9 @@ dgammadx <- function(x, deriv.arg = 1) {
llTry = egev(giveWarning=
FALSE)@loglikelihood(mu = NULL, y = y, w = w,
residuals = FALSE,
- eta = cbind(theta2eta(muTry, .llocation, earg = .elocation),
- theta2eta(sigmaTry, .lscale, earg = .escale),
- theta2eta(xi.try, .lshape, earg = .eshape)))
+ eta = cbind(theta2eta(muTry, .llocat , earg = .elocat ),
+ theta2eta(sigmaTry, .lscale , earg = .escale ),
+ theta2eta(xi.try, .lshape , earg = .eshape )))
if (llTry >= objecFunction) {
if (est.sigma)
init.sig = sigmaTry
@@ -625,7 +754,7 @@ dgammadx <- function(x, deriv.arg = 1) {
if (!length(init.sig))
init.sig = rep(sqrt(6*var(y))/pi,
length.out = length(y))
- EulerM = -digamma(1)
+ EulerM <- -digamma(1)
init.mu = rep(median(y) - EulerM * init.sig,
length.out = length(y))
}
@@ -638,21 +767,21 @@ dgammadx <- function(x, deriv.arg = 1) {
extra$percentiles = .percentiles
- etastart =
- cbind(theta2eta(init.mu, .llocation, earg = .elocation),
- theta2eta(init.sig, .lscale, earg = .escale),
- theta2eta(init.xi, .lshape, earg = .eshape))
+ etastart <-
+ cbind(theta2eta(init.mu, .llocat , earg = .elocat ),
+ theta2eta(init.sig, .lscale , earg = .escale ),
+ theta2eta(init.xi, .lshape , earg = .eshape ))
}
- }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
+ }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape,
.percentiles = percentiles, .tolshape0 = tolshape0,
- .elocation = elocation, .escale = escale, .eshape = eshape,
.imethod = imethod,
.giveWarning= giveWarning,
.iscale = iscale, .ishape = ishape, .gshape = gshape ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- loc <- eta2theta(eta[, 1], .llocation, earg = .elocation)
- sigma <- eta2theta(eta[, 2], .lscale, earg = .escale)
- xi <- eta2theta(eta[,3], .lshape, earg = .eshape)
+ loc <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ xi <- eta2theta(eta[, 3], .lshape , earg = .eshape )
is.zero <- (abs(xi) < .tolshape0)
cent = extra$percentiles
LP <- length(cent)
@@ -667,37 +796,37 @@ dgammadx <- function(x, deriv.arg = 1) {
dimnames(fv) = list(dimnames(eta)[[1]],
paste(as.character(cent), "%", sep = ""))
} else {
- EulerM = -digamma(1)
+ EulerM <- -digamma(1)
fv = loc + sigma * EulerM # When xi = 0, is Gumbel
fv[!is.zero] = loc[!is.zero] + sigma[!is.zero] *
(gamma(1-xi[!is.zero])-1) / xi[!is.zero]
fv[xi >= 1] = NA # Mean exists only if xi < 1.
}
fv
- }, list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
- .elocation = elocation, .escale = escale, .eshape = eshape,
+ }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape,
.tolshape0 = tolshape0 ))),
last = eval(substitute(expression({
- misc$links <- c(location = .llocation,
- scale = .lscale,
+ misc$links <- c(location = .llocat,
+ scale = .lscale ,
shape = .lshape)
misc$true.mu = !length( .percentiles) # @fitted is not a true mu
misc$percentiles <- .percentiles
- misc$earg = list(location = .elocation,
+ misc$earg = list(location = .elocat,
scale = .escale,
shape = .eshape)
misc$tolshape0 = .tolshape0
misc$expected = TRUE
if (any(xi < -0.5))
warning("some values of the shape parameter are less than -0.5")
- }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
- .elocation = elocation, .escale = escale, .eshape = eshape,
+ }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape,
.tolshape0 = tolshape0, .percentiles = percentiles ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mmu <- eta2theta(eta[, 1], .llocation, earg = .elocation )
- sigma <- eta2theta(eta[, 2], .lscale, earg = .escale )
- xi <- eta2theta(eta[,3], .lshape, earg = .eshape )
+ mmu <- eta2theta(eta[, 1], .llocat , earg = .elocat )
+ sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
+ xi <- eta2theta(eta[, 3], .lshape , earg = .eshape )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
@@ -706,16 +835,16 @@ dgammadx <- function(x, deriv.arg = 1) {
log = TRUE, oobounds.log = -1.0e04,
giveWarning= .giveWarning))
}
- }, list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
- .elocation = elocation, .escale = escale, .eshape = eshape,
+ }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape,
.giveWarning= giveWarning, .tolshape0 = tolshape0 ))),
vfamily = c("egev", "vextremes"),
deriv = eval(substitute(expression({
- mmu = eta2theta(eta[, 1], .llocation, earg = .elocation)
- sigma = eta2theta(eta[, 2], .lscale, earg = .escale )
- xi = eta2theta(eta[,3], .lshape, earg = .eshape)
+ Locat = eta2theta(eta[, 1], .llocat , earg = .elocat )
+ sigma = eta2theta(eta[, 2], .lscale , earg = .escale )
+ xi = eta2theta(eta[, 3], .lshape , earg = .eshape)
is.zero <- (abs(xi) < .tolshape0)
- zedd = (y-mmu) / sigma
+ zedd = (y-Locat) / sigma
A = 1 + xi * zedd
dA.dxi = zedd
dA.dmu = -xi / sigma
@@ -737,14 +866,14 @@ dgammadx <- function(x, deriv.arg = 1) {
dl.dxi[is.zero] = zedd[is.zero] *
((1 - ezedd) * zedd[is.zero] / 2 - 1)
}
- dmu.deta = dtheta.deta(mmu, .llocation, earg = .elocation)
- dsi.deta = dtheta.deta(sigma, .lscale, earg = .escale )
- dxi.deta = dtheta.deta(xi, .lshape, earg = .eshape)
+ dmu.deta = dtheta.deta(Locat, .llocat , earg = .elocat )
+ dsi.deta = dtheta.deta(sigma, .lscale , earg = .escale )
+ dxi.deta = dtheta.deta(xi, .lshape , earg = .eshape)
c(w) * cbind(dl.dmu * dmu.deta,
dl.dsi * dsi.deta,
dl.dxi * dxi.deta)
- }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
- .elocation = elocation, .escale = escale, .eshape = eshape,
+ }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape,
.tolshape0 = tolshape0 ))),
weight = eval(substitute(expression({
bad <- A <= 0
@@ -759,29 +888,29 @@ dgammadx <- function(x, deriv.arg = 1) {
wz = matrix(as.numeric(NA), n, 6)
wz[, iam(1, 1, M)] = pp / sigma^2
wz[, iam(2, 2, M)] = (1-2*temp100 + pp) / (sigma * kay)^2
- EulerM = -digamma(1)
- wz[, iam(3,3, M)] = (pi^2 / 6 + (1-EulerM-1/kay)^2 +
+ EulerM <- -digamma(1)
+ wz[, iam(3, 3, M)] = (pi^2 / 6 + (1-EulerM-1/kay)^2 +
(2*qq + pp/kay)/kay) / kay^2
wz[, iam(1, 2, M)] = (pp - temp100) / (sigma^2 * kay)
- wz[, iam(1,3, M)] = -(qq + pp/kay) / (sigma * kay)
- wz[, iam(2,3, M)] = (1-EulerM - (1-temp100)/kay - qq -
+ wz[, iam(1, 3, M)] = -(qq + pp/kay) / (sigma * kay)
+ wz[, iam(2, 3, M)] = (1-EulerM - (1-temp100)/kay - qq -
pp/kay) / (sigma * kay^2)
if (any(is.zero)) {
wz[is.zero, iam(2, 2, M)] = (pi^2/6 + (1-EulerM)^2) / sigma^2
- wz[is.zero, iam(3,3, M)] <- 2.4236
+ wz[is.zero, iam(3, 3, M)] <- 2.4236
wz[is.zero, iam(1, 2, M)] <- (digamma(2) + 2*(EulerM-1)) / sigma^2
- wz[is.zero, iam(1,3, M)] <- -(trigamma(1)/2 + digamma(1)*
+ wz[is.zero, iam(1, 3, M)] <- -(trigamma(1)/2 + digamma(1)*
(digamma(1)/2+1))/sigma
- wz[is.zero, iam(2,3, M)] <- (-dgammadx(2,3)/6 + dgammadx(1, 1) +
+ wz[is.zero, iam(2, 3, M)] <- (-dgammadx(2, 3)/6 + dgammadx(1, 1) +
2*dgammadx(1, 2) +
- 2*dgammadx(1,3)/3)/sigma
+ 2*dgammadx(1, 3)/3)/sigma
}
wz[, iam(1, 1, M)] <- wz[, iam(1, 1, M)] * dmu.deta^2
wz[, iam(2, 2, M)] <- wz[, iam(2, 2, M)] * dsi.deta^2
- wz[, iam(3,3, M)] <- wz[, iam(3,3, M)] * dxi.deta^2
+ wz[, iam(3, 3, M)] <- wz[, iam(3, 3, M)] * dxi.deta^2
wz[, iam(1, 2, M)] <- wz[, iam(1, 2, M)] * dmu.deta * dsi.deta
- wz[, iam(1,3, M)] <- wz[, iam(1,3, M)] * dmu.deta * (-dxi.deta)
- wz[, iam(2,3, M)] <- wz[, iam(2,3, M)] * dsi.deta * (-dxi.deta)
+ wz[, iam(1, 3, M)] <- wz[, iam(1, 3, M)] * dmu.deta * (-dxi.deta)
+ wz[, iam(2, 3, M)] <- wz[, iam(2, 3, M)] * dsi.deta * (-dxi.deta)
c(w) * wz
}), list( .eshape = eshape, .tolshape0 = tolshape0 ))))
}
@@ -791,91 +920,106 @@ dgammadx <- function(x, deriv.arg = 1) {
rgumbel <- function(n, location = 0, scale = 1) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
- stop("bad input for argument 'n'") else n
-
- answer = location - scale * log(-log(runif(use.n)))
- answer[scale <= 0] = NaN
- answer
+ use.n = if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+
+ answer <- location - scale * log(-log(runif(use.n)))
+ answer[scale <= 0] <- NaN
+ answer
}
dgumbel <- function(x, location = 0, scale = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
- zedd = (x - location) / scale
- logdensity = -zedd - exp(-zedd) - log(scale)
- if (log.arg) logdensity else exp(logdensity)
+ zedd = (x - location) / scale
+ logdensity = -zedd - exp(-zedd) - log(scale)
+ if (log.arg) logdensity else exp(logdensity)
}
qgumbel <- function(p, location = 0, scale = 1) {
- answer = location - scale * log(-log(p))
- answer[scale <= 0] = NaN
- answer[p < 0] = NaN
- answer[p > 1] = NaN
- answer[p == 0] = -Inf
- answer[p == 1] = Inf
- answer
+ answer <- location - scale * log(-log(p))
+ answer[scale <= 0] <- NaN
+ answer[p < 0] <- NaN
+ answer[p > 1] <- NaN
+ answer[p == 0] <- -Inf
+ answer[p == 1] <- Inf
+ answer
}
pgumbel <- function(q, location = 0, scale = 1) {
- answer = exp(-exp(-(q-location) / scale))
- answer[scale <= 0] = NaN
- answer
+ answer <- exp(-exp(-(q - location) / scale))
+ answer[scale <= 0] <- NaN
+ answer
}
gumbel <- function(llocation = "identity",
- lscale = "loge",
- elocation = list(),
- escale = list(),
- iscale = NULL,
- R=NA, percentiles = c(95,99),
- mpv = FALSE, zero = NULL)
+ lscale = "loge",
+ iscale = NULL,
+ R = NA, percentiles = c(95, 99),
+ mpv = FALSE, zero = NULL)
{
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
+
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
if (!is.logical(mpv) || length(mpv) != 1)
stop("bad input for argument 'mpv'")
+
if (length(percentiles) &&
(!is.Numeric(percentiles, positive = TRUE) ||
max(percentiles) >= 100))
stop("bad input for argument 'percentiles'")
+
if (length(zero) &&
!is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
+
if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
- if (!is.list(elocation)) elocation = list()
- if (!is.list(escale)) escale = list()
+
new("vglmff",
blurb = c("Gumbel distribution for extreme value regression\n",
"Links: ",
- namesof("location", link = llocation, earg = elocation), ", ",
- namesof("scale", link = lscale, earg = escale )),
+ namesof("location", llocat, earg = elocat), ", ",
+ namesof("scale", lscale, earg = escale )),
constraints=eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
- predictors.names =
- c(namesof("location", .llocation, earg = .elocation, short = TRUE),
- namesof("scale", .lscale, earg = .escale , short = TRUE))
+
+ predictors.names <-
+ c(namesof("location", .llocat , earg = .elocat , short = TRUE),
+ namesof("scale", .lscale , earg = .escale , short = TRUE))
+
+
y = as.matrix(y)
if (ncol(y) > 1)
y = -t(apply(-y, 1, sort, na.last = TRUE))
+
+
+
r.vec = rowSums(cbind(!is.na(y)))
if (any(r.vec == 0))
stop("There is at least one row of the response containing all NAs")
+
+
+
if (ncol(y) > 1) {
yiri = y[cbind(1:nrow(y), r.vec)]
sc.init = if (is.Numeric( .iscale, positive = TRUE))
@@ -887,7 +1031,7 @@ pgumbel <- function(q, location = 0, scale = 1) {
sc.init = if (is.Numeric( .iscale, positive = TRUE))
.iscale else 1.1 * (0.01+sqrt(var(y)*6)) / pi
sc.init = rep(sc.init, length.out = n)
- EulerM = -digamma(1)
+ EulerM <- -digamma(1)
loc.init = (y - sc.init * EulerM)
loc.init[loc.init <= 0] = min(y)
}
@@ -897,96 +1041,106 @@ pgumbel <- function(q, location = 0, scale = 1) {
extra$percentiles = .percentiles
if (!length(etastart))
- etastart =
- cbind(theta2eta(loc.init, .llocation, earg = .elocation),
- theta2eta(sc.init, .lscale, earg = .escale ))
-}), list( .llocation = llocation, .lscale = lscale, .iscale = iscale,
- .elocation = elocation, .escale = escale,
- .R = R, .mpv = mpv, .percentiles = percentiles ))),
+ etastart <-
+ cbind(theta2eta(loc.init, .llocat , earg = .elocat ),
+ theta2eta( sc.init, .lscale , earg = .escale ))
+ }), list( .llocat = llocat, .lscale = lscale, .iscale = iscale,
+ .elocat = elocat, .escale = escale,
+ .R = R, .mpv = mpv, .percentiles = percentiles ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- loc = eta2theta(eta[, 1], .llocation, earg = .elocation)
- sigma = eta2theta(eta[, 2], .lscale, earg = .escale ) # sigma
- Percentiles = extra$percentiles
- LP = length(Percentiles) # may be 0
- if (LP > 0) {
- mpv = extra$mpv
- mu = matrix(as.numeric(NA), nrow(eta), LP + mpv) # LP may be 0
- Rvec = extra$R
- for(ii in 1:LP) {
- ci = if (is.Numeric(Rvec))
- Rvec * (1 - Percentiles[ii] / 100) else
- -log(Percentiles[ii] / 100)
- mu[,ii] = loc - sigma * log(ci)
- }
- if (mpv)
- mu[,ncol(mu)] = loc - sigma * log(log(2))
- dmn2 = paste(as.character(Percentiles), "%", sep = "")
+ loc = eta2theta(eta[, 1], .llocat , earg = .elocat )
+ sigma = eta2theta(eta[, 2], .lscale , earg = .escale ) # sigma
+
+ Percentiles = extra$percentiles
+ LP = length(Percentiles) # may be 0
+ if (LP > 0) {
+ mpv = extra$mpv
+ mu = matrix(as.numeric(NA), nrow(eta), LP + mpv) # LP may be 0
+ Rvec = extra$R
+ for(ii in 1:LP) {
+ ci = if (is.Numeric(Rvec))
+ Rvec * (1 - Percentiles[ii] / 100) else
+ -log(Percentiles[ii] / 100)
+ mu[,ii] = loc - sigma * log(ci)
+ }
if (mpv)
- dmn2 = c(dmn2, "MPV")
- dimnames(mu) = list(dimnames(eta)[[1]], dmn2)
- } else {
- EulerM = -digamma(1)
- mu = loc + sigma * EulerM
- }
- mu
- }, list( .llocation = llocation, .lscale = lscale,
- .elocation = elocation, .escale = escale ))),
+ mu[,ncol(mu)] = loc - sigma * log(log(2))
+ dmn2 = paste(as.character(Percentiles), "%", sep = "")
+ if (mpv)
+ dmn2 = c(dmn2, "MPV")
+ dimnames(mu) = list(dimnames(eta)[[1]], dmn2)
+ } else {
+ EulerM <- -digamma(1)
+ mu = loc + sigma * EulerM
+ }
+ mu
+ }, list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale ))),
+
last = eval(substitute(expression({
+ misc$links = c(location = .llocat, scale = .lscale)
+
+ misc$earg = list(location= .elocat, scale= .escale )
+
misc$R = .R
- misc$links = c(location = .llocation, scale = .lscale)
- misc$earg = list(location= .elocation, scale= .escale )
misc$mpv = .mpv
misc$true.mu = !length( .percentiles) # @fitted is not a true mu
misc$percentiles = .percentiles
- }), list( .llocation = llocation, .lscale = lscale,
- .elocation = elocation, .escale = escale,
+ }), list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale,
.percentiles = percentiles,
.mpv = mpv, .R = R ))),
vfamily = c("gumbel", "vextremes"),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- loc = eta2theta(eta[, 1], .llocation, earg = .elocation)
- sigma = eta2theta(eta[, 2], .lscale, earg = .escale )
- r.vec = rowSums(cbind(!is.na(y)))
- yiri = y[cbind(1:nrow(y),r.vec)]
- ans = -r.vec * log(sigma) - exp( -(yiri-loc)/sigma )
- max.r.vec = max(r.vec)
+ loc <- eta2theta(eta[, 1], .llocat, earg = .elocat)
+ sigma <- eta2theta(eta[, 2], .lscale , earg = .escale )
+
+ r.vec <- rowSums(cbind(!is.na(y)))
+ yiri <- y[cbind(1:nrow(y),r.vec)]
+ ans <- -r.vec * log(sigma) - exp( -(yiri-loc)/sigma )
+ max.r.vec <- max(r.vec)
for(jay in 1:max.r.vec) {
- index = (jay <= r.vec)
- ans[index] = ans[index] - (y[index,jay]-loc[index]) / sigma[index]
+ index <- (jay <= r.vec)
+ ans[index] <- ans[index] - (y[index,jay]-loc[index]) / sigma[index]
}
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * ans)
+ sum(c(w) * ans)
}
- }, list( .llocation = llocation, .lscale = lscale,
- .elocation = elocation, .escale = escale ))),
+ }, list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale ))),
deriv = eval(substitute(expression({
- loc = eta2theta(eta[, 1], .llocation, earg = .elocation)
- sigma = eta2theta(eta[, 2], .lscale, earg = .escale )
+ loc = eta2theta(eta[, 1], .llocat, earg = .elocat)
+ sigma = eta2theta(eta[, 2], .lscale , earg = .escale )
+
r.vec = rowSums(cbind(!is.na(y)))
yiri = y[cbind(1:nrow(y),r.vec)]
yi.bar = rowMeans(y, na.rm = TRUE)
temp2 = (yiri - loc) / sigma
term2 = exp(-temp2)
- dloc.deta = dtheta.deta(loc, .llocation, earg = .elocation)
- dsigma.deta = dtheta.deta(sigma, .lscale, earg = .escale )
+
+ dloc.deta = dtheta.deta(loc, .llocat, earg = .elocat)
+ dsigma.deta = dtheta.deta(sigma, .lscale , earg = .escale )
+
dl.dloc = (r.vec - term2) / sigma
dl.dsigma = (rowSums((y - loc) / sigma, na.rm = TRUE) - r.vec -
temp2 * term2) / sigma
- c(w) * cbind(dl.dloc * dloc.deta,
+
+ c(w) * cbind(dl.dloc * dloc.deta,
dl.dsigma * dsigma.deta)
- }), list( .llocation = llocation, .lscale = lscale,
- .elocation = elocation, .escale = escale ))),
+ }), list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale ))),
weight = eval(substitute(expression({
- temp6 = digamma(r.vec) # , integer=T
+ temp6 = digamma(r.vec) # , integer = T
temp5 = digamma(1:max(r.vec)) # , integer=T
temp5 = matrix(temp5, n, max(r.vec), byrow = TRUE)
temp5[col(temp5) > r.vec] = 0
temp5 = temp5 %*% rep(1, ncol(temp5))
+
wz = matrix(as.numeric(NA), n, dimm(M = 2)) # 3=dimm(M = 2)
wz[, iam(1, 1, M)] = r.vec / sigma^2
wz[, iam(2, 1, M)] = -(1 + r.vec * temp6) / sigma^2
@@ -995,6 +1149,7 @@ pgumbel <- function(q, location = 0, scale = 1) {
wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] * dloc.deta^2
wz[, iam(2, 1, M)] = wz[, iam(2, 1, M)] * dsigma.deta * dloc.deta
wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)] * dsigma.deta^2
+
c(w) * wz
}), list( .lscale = lscale ))))
}
@@ -1002,29 +1157,34 @@ pgumbel <- function(q, location = 0, scale = 1) {
rgpd <- function(n, location = 0, scale = 1, shape = 0) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
- stop("bad input for argument 'n'") else n
+ use.n <- if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
if (!is.Numeric(location))
stop("bad input for argument 'location'")
if (!is.Numeric(shape))
stop("bad input for argument 'shape'")
- ans = numeric(use.n)
- shape = rep(shape, length.out = use.n);
- location = rep(location, length.out = use.n);
- scale = rep(scale, length.out = use.n)
- scase = abs(shape) < sqrt(.Machine$double.eps)
- nscase = sum(scase)
+ ans <- numeric(use.n)
+ if (length(shape) != use.n)
+ shape <- rep(shape, length.out = use.n)
+ if (length(location) != use.n)
+ location <- rep(location, length.out = use.n);
+ if (length(scale) != use.n)
+ scale <- rep(scale, length.out = use.n)
+
+
+ scase <- abs(shape) < sqrt(.Machine$double.eps)
+ nscase <- sum(scase)
if (use.n - nscase)
- ans[!scase] = location[!scase] +
- scale[!scase] *
+ ans[!scase] <- location[!scase] +
+ scale[!scase] *
((runif(use.n - nscase))^(-shape[!scase])-1) / shape[!scase]
if (nscase)
- ans[scase] = location[scase] - scale[scase] * log(runif(nscase))
- ans[scale <= 0] = NaN
+ ans[scase] <- location[scase] - scale[scase] * log(runif(nscase))
+ ans[scale <= 0] <- NaN
ans
}
@@ -1033,53 +1193,65 @@ rgpd <- function(n, location = 0, scale = 1, shape = 0) {
dgpd <- function(x, location = 0, scale = 1, shape = 0, log = FALSE,
tolshape0 = sqrt(.Machine$double.eps),
oobounds.log = -Inf, giveWarning = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
- if (oobounds.log > 0)
- stop("bad input for argument 'oobounds.log'")
-
- if (!is.Numeric(tolshape0, allowable.length = 1, positive = TRUE))
- stop("bad input for argument 'tolshape0'")
- L = max(length(x), length(location), length(scale), length(shape))
- shape = rep(shape, length.out = L);
- location = rep(location, length.out = L);
- scale = rep(scale, length.out = L);
- x = rep(x, length.out = L)
-
- logdensity = rep(log(0), length.out = L)
- scase = abs(shape) < tolshape0
- nscase = sum(scase)
- if (L - nscase) {
- zedd = (x-location) / scale
- xok = (!scase) & (zedd > 0) & (1 + shape*zedd > 0)
- logdensity[xok] = -(1 + 1/shape[xok])*log1p(shape[xok]*zedd[xok]) -
- log(scale[xok])
- outofbounds = (!scase) & ((zedd <= 0) | (1 + shape*zedd <= 0))
- if (any(outofbounds)) {
- logdensity[outofbounds] = oobounds.log
- no.oob = sum(outofbounds)
- if (giveWarning)
- warning(no.oob, " observation",
- ifelse(no.oob > 1, "s are", " is"), " out of bounds")
- }
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ if (oobounds.log > 0)
+ stop("bad input for argument 'oobounds.log'")
+
+ if (!is.Numeric(tolshape0, allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'tolshape0'")
+
+
+ L = max(length(x), length(location), length(scale), length(shape))
+ if (length(shape) != L)
+ shape <- rep(shape, length.out = L)
+ if (length(location) != L)
+ location <- rep(location, length.out = L);
+ if (length(scale) != L)
+ scale <- rep(scale, length.out = L)
+ if (length(x) != L)
+ x <- rep(x, length.out = L)
+
+
+
+
+
+
+ logdensity = rep(log(0), length.out = L)
+ scase = abs(shape) < tolshape0
+ nscase = sum(scase)
+ if (L - nscase) {
+ zedd = (x-location) / scale
+ xok = (!scase) & (zedd > 0) & (1 + shape*zedd > 0)
+ logdensity[xok] = -(1 + 1/shape[xok])*log1p(shape[xok]*zedd[xok]) -
+ log(scale[xok])
+ outofbounds = (!scase) & ((zedd <= 0) | (1 + shape*zedd <= 0))
+ if (any(outofbounds)) {
+ logdensity[outofbounds] = oobounds.log
+ no.oob = sum(outofbounds)
+ if (giveWarning)
+ warning(no.oob, " observation",
+ ifelse(no.oob > 1, "s are", " is"), " out of bounds")
}
- if (nscase) {
- xok = scase & (x > location)
- logdensity[xok] = -(x[xok] - location[xok]) / scale[xok] -
- log(scale[xok])
- outofbounds = scase & (x <= location)
- if (any(outofbounds)) {
- logdensity[outofbounds] = oobounds.log
- no.oob = sum(outofbounds)
- if (giveWarning)
- warning(no.oob, " observation",
- ifelse(no.oob > 1, "s are", " is"), " out of bounds")
- }
+ }
+ if (nscase) {
+ xok = scase & (x > location)
+ logdensity[xok] = -(x[xok] - location[xok]) / scale[xok] -
+ log(scale[xok])
+ outofbounds = scase & (x <= location)
+ if (any(outofbounds)) {
+ logdensity[outofbounds] = oobounds.log
+ no.oob = sum(outofbounds)
+ if (giveWarning)
+ warning(no.oob, " observation",
+ ifelse(no.oob > 1, "s are", " is"), " out of bounds")
}
+ }
- logdensity[scale <= 0] = NaN
- if (log.arg) logdensity else exp(logdensity)
+ logdensity[scale <= 0] = NaN
+ if (log.arg) logdensity else exp(logdensity)
}
@@ -1093,17 +1265,24 @@ pgpd <- function(q, location = 0, scale = 1, shape = 0) {
stop("bad input for argument 'shape'")
use.n = max(length(q), length(location), length(scale), length(shape))
- ans = numeric(use.n)
- shape = rep(shape, length.out = use.n);
- location = rep(location, length.out = use.n);
- scale = rep(scale, length.out = use.n);
- q = rep(q-location, length.out = use.n)
+
+ ans <- numeric(use.n)
+ if (length(shape) != use.n)
+ shape <- rep(shape, length.out = use.n)
+ if (length(location) != use.n)
+ location <- rep(location, length.out = use.n);
+ if (length(scale) != use.n)
+ scale <- rep(scale, length.out = use.n)
+ if (length(q) != use.n)
+ q <- rep(q - location, length.out = use.n)
+
+
scase = abs(shape) < sqrt(.Machine$double.eps)
nscase = sum(scase)
if (use.n - nscase) {
q[q < 0] = 0
- ans = 1 - pmax(0, (1 + shape*q/scale))^(-1/shape)
+ ans <- 1 - pmax(0, (1 + shape*q/scale))^(-1/shape)
}
if (nscase) {
pos = q >= 0
@@ -1116,14 +1295,22 @@ pgpd <- function(q, location = 0, scale = 1, shape = 0) {
ans
}
+
qgpd <- function(p, location = 0, scale = 1, shape = 0) {
- use.n = max(length(p), length(location), length(scale), length(shape))
- ans = numeric(use.n)
- shape = rep(shape, length.out = use.n);
- location = rep(location, length.out = use.n);
- scale = rep(scale, length.out = use.n);
- p = rep(p, length.out = use.n)
+ use.n = max(length(p), length(location), length(scale), length(shape))
+
+ ans <- numeric(use.n)
+ if (length(shape) != use.n)
+ shape <- rep(shape, length.out = use.n)
+ if (length(location) != use.n)
+ location <- rep(location, length.out = use.n);
+ if (length(scale) != use.n)
+ scale <- rep(scale, length.out = use.n)
+ if (length(p) != use.n)
+ p <- rep(p, length.out = use.n)
+
+
scase = abs(shape) < sqrt(.Machine$double.eps)
nscase = sum(scase)
@@ -1149,189 +1336,346 @@ qgpd <- function(p, location = 0, scale = 1, shape = 0) {
-
gpd <- function(threshold = 0,
lscale = "loge",
- lshape = "logoff",
- escale = list(),
- eshape = if (lshape == "logoff") list(offset = 0.5) else
- if (lshape == "elogit") list(min = -0.5, max = 0.5) else NULL,
- percentiles = c(90,95),
+ lshape = logoff(offset = 0.5),
+ percentiles = c(90, 95),
iscale = NULL,
ishape = NULL,
tolshape0 = 0.001, giveWarning = TRUE,
imethod = 1,
- zero = 2) {
- if (!is.logical(giveWarning) || length(giveWarning) != 1)
- stop("bad input for argument 'giveWarning'")
- if (!is.Numeric(threshold))
- stop("bad input for argument 'threshold'")
- if (!is.Numeric(imethod, allowable.length = 1,
- positive = TRUE, integer.valued = TRUE) ||
- imethod > 2.5)
- stop("argument 'imethod' must be 1 or 2")
+ zero = -2) {
+ if (!is.logical(giveWarning) || length(giveWarning) != 1)
+ stop("bad input for argument 'giveWarning'")
+ if (!is.Numeric(threshold))
+ stop("bad input for argument 'threshold'")
+ if (!is.Numeric(imethod, allowable.length = 1,
+ positive = TRUE, integer.valued = TRUE) ||
+ imethod > 2.5)
+ stop("argument 'imethod' must be 1 or 2")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+ lshape <- as.list(substitute(lshape))
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
+
+
+ if (length(percentiles) &&
+ (!is.Numeric(percentiles, positive = TRUE) ||
+ max(percentiles) >= 100))
+ stop("bad input for argument 'percentiles'")
+ if (!is.Numeric(tolshape0, allowable.length = 1, positive = TRUE) ||
+ tolshape0 > 0.1)
+ stop("bad input for argument 'tolshape0'")
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+ new("vglmff",
+ blurb = c("Generalized Pareto distribution\n",
+ "Links: ",
+ namesof("scale", link = lscale, earg = escale ), ", ",
+ namesof("shape", link = lshape, earg = eshape )),
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 2
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ zero = .zero )
+ }, list( .zero = zero
+ ))),
+
+
+ initialize = eval(substitute(expression({
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ ncoly <- ncol(y)
+ Musual <- 2
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+ y.names <- dimnames(y)[[2]]
+ if (length(y.names) != ncoly)
+ y.names <- paste("Y", 1:ncoly, sep = "")
+ extra$y.names <- y.names
+
+
+
+ Threshold <- if (is.Numeric( .threshold )) .threshold else 0
+ Threshold <- matrix(Threshold, n, ncoly, byrow = TRUE)
+ if (is.Numeric( .threshold )) {
+ orig.y <- y
+ }
+ ystar <- as.matrix(y - Threshold) # Operate on ystar
+ extra$threshold <- Threshold
+
+
+ mynames1 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames2 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ c(namesof(mynames1, .lscale , earg = .escale , tag = FALSE),
+ namesof(mynames2, .lshape , earg = .eshape , tag = FALSE))[
+ interleave.VGAM(M, M = Musual)]
+
+
+
+ if (!length(etastart)) {
+ meany <- colSums(ystar * w) / colSums(w)
+ vary <- apply(ystar, 2, var)
+ mediany <- apply(ystar, 2, median)
+
+
+ init.xii <- if (length( .ishape )) .ishape else {
+ if ( .imethod == 1)
+ -0.5 * (meany^2 / vary - 1) else
+ 0.5 * (1 - mediany^2 / vary)
+ }
+ init.sig <- if (length( .iscale )) .iscale else {
+ if (.imethod == 1)
+ 0.5 * meany * (meany^2 / vary + 1) else
+ abs(1 - init.xii) * mediany
+ }
+
+
+ init.xii <- matrix(init.xii, n, ncoly, byrow = TRUE)
+ init.sig <- matrix(init.sig, n, ncoly, byrow = TRUE)
+
+
+ init.sig[init.sig <= 0.0] <- 0.01 # sigma > 0
+ init.xii[init.xii <= -0.5] <- -0.40 # Fisher scoring works if xi > -0.5
+ init.xii[init.xii >= 1.0] <- 0.90 # Mean/var exists if xi < 1 / 0.5
+ if ( .lshape == "loge")
+ init.xii[init.xii <= 0.0] <- 0.05
+
+
+
+ etastart <-
+ cbind(theta2eta(init.sig, .lscale , earg = .escale ),
+ theta2eta(init.xii, .lshape , earg = .eshape ))[,
+ interleave.VGAM(M, M = Musual)]
+ }
+ }), list( .lscale = lscale, .lshape = lshape,
+ .iscale = iscale, .ishape = ishape,
+ .escale = escale, .eshape = eshape,
+ .percentiles = percentiles,
+ .threshold = threshold,
+ .imethod = imethod ))),
+
+
+
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ sigma <- eta2theta(eta[, c(TRUE, FALSE)], .lscale , earg = .escale )
+ shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , earg = .eshape )
+ if (!is.matrix(sigma))
+ sigma <- as.matrix(sigma)
+ if (!is.matrix(shape))
+ shape <- as.matrix(shape)
+
+
+ Musual <- 2
+ pcent <- .percentiles
+ LP <- length(pcent) # NULL means LP == 0 and the mean is returned
+ ncoly <- ncol(eta) / Musual
+ if (!length(y.names <- extra$y.names))
+ y.names <- paste("Y", 1:ncoly, sep = "")
+
+ Threshold <- extra$threshold
+
+
+
+ if (LP) {
+
+
+
+
+ do.one <- function(yvec, shape, scale,
+ threshold,
+ percentiles = c(90, 95),
+ y.name = NULL,
+ tolshape0 = 0.001) {
+ is.zero <- (abs(shape) < tolshape0 ) # A matrix
+
+ LP = length(percentiles)
+ fv = matrix(as.numeric(NA), length(shape), LP)
+ is.zero = (abs(shape) < tolshape0)
+ for(ii in 1:LP) {
+ temp = 1 - percentiles[ii] / 100
+ fv[!is.zero, ii] = threshold[!is.zero] +
+ (temp^(-shape[!is.zero]) - 1) *
+ scale[!is.zero] / shape[!is.zero]
+ fv[ is.zero, ii] = threshold[is.zero] - scale[is.zero] * log(temp)
+ }
+
+ post.name <- paste(as.character(percentiles), "%", sep = "")
+
+ dimnames(fv) <-
+ list(dimnames(shape)[[1]],
+ if (is.null(y.name))
+ post.name else
+ paste(y.name, post.name, sep = " "))
+ fv
+ }
+
+
+
+
+ fv <- matrix(-1, nrow(sigma), LP * ncoly)
+ colnames.cumsum.fv <- NULL
+ for(jlocal in 1:ncoly) {
+ block.mat.fv <-
+ do.one(yvec = y[, jlocal],
+ shape = shape[, jlocal],
+ scale = sigma[, jlocal],
+ threshold = Threshold[, jlocal],
+ percentiles = pcent,
+ y.name = if (ncoly > 1) y.names[jlocal] else NULL,
+ tolshape0 = .tolshape0 )
+ fv[, (jlocal - 1) * LP + (1:LP)] <- block.mat.fv
+ colnames.cumsum.fv <- c(colnames.cumsum.fv,
+ colnames(block.mat.fv))
+ }
+ colnames(fv) <- colnames.cumsum.fv
+ } else {
+ fv <- Threshold + sigma / (1 - shape)
+ fv[shape >= 1] <- NA # Mean exists only if shape < 1.
+ dimnames(fv) <- list(dimnames(eta)[[1]], y.names)
+ }
+
+ fv
+ }, list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape,
+ .threshold = threshold,
+ .tolshape0 = tolshape0,
+ .percentiles = percentiles ))),
+
+
+
+
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+ misc$link <-
+ c(rep( .lscale , length = ncoly),
+ rep( .lshape , length = ncoly))[interleave.VGAM(M, M = Musual)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+ names(misc$link) <- temp.names
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- temp.names
+ for(ii in 1:ncoly) {
+ misc$earg[[Musual*ii-1]] <- .escale
+ misc$earg[[Musual*ii ]] <- .eshape
+ }
+
+ misc$Musual <- Musual
+ misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
+
+ misc$true.mu <- FALSE # @fitted is not a true mu
+ misc$percentiles <- .percentiles
+ misc$tolshape0 <- .tolshape0
+ if (any(Shape < -0.5))
+ warning("some values of the shape parameter are less than -0.5")
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape,
+ .threshold = threshold,
+ .tolshape0 = tolshape0, .percentiles = percentiles ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ sigma <- eta2theta(eta[, c(TRUE, FALSE)], .lscale , earg = .escale )
+ Shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , earg = .eshape )
+ Threshold <- extra$threshold
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dgpd(x = y, location = Threshold, scale = sigma,
+ shape = Shape, tolshape0 = .tolshape0,
+ giveWarning = .giveWarning,
+ log = TRUE, oobounds.log = -1.0e04))
+ }
+ }, list( .tolshape0 = tolshape0, .giveWarning= giveWarning,
+ .escale = escale, .eshape = eshape,
+ .lscale = lscale, .lshape = lshape ))),
+ vfamily = c("gpd", "vextremes"),
+ deriv = eval(substitute(expression({
+ Musual <- 2
+ sigma <- eta2theta(eta[, c(TRUE, FALSE)], .lscale , earg = .escale )
+ Shape <- eta2theta(eta[, c(FALSE, TRUE)], .lshape , earg = .eshape )
+
+ Threshold <- extra$threshold
+ ystar <- y - Threshold # Operate on ystar
+ A <- 1 + Shape * ystar / sigma
+
+ mytolerance <- .Machine$double.eps
+ bad <- (A <= mytolerance)
+ if (any(bad) && any(w[bad] != 0)) {
+ cat(sum(w[bad],na.rm = TRUE), # "; ignoring them"
+ "observations violating boundary constraints\n")
+ flush.console()
+ }
+ if (any(is.zero <- (abs(Shape) < .tolshape0))) {
+ }
+ igpd <- !is.zero & !bad
+ iexp <- is.zero & !bad
+
+ dl.dShape <- dl.dsigma <- rep(0, length.out = length(y))
+ dl.dsigma[igpd] <- ((1 + Shape[igpd]) * ystar[igpd] / (sigma[igpd] +
+ Shape[igpd]*ystar[igpd]) - 1) / sigma[igpd]
+
+ dl.dShape[igpd] <- log(A[igpd])/Shape[igpd]^2 - (1 + 1/Shape[igpd]) *
+ ystar[igpd] / (A[igpd] * sigma[igpd])
+ dl.dShape[iexp] <- ystar[iexp] *
+ (0.5*ystar[iexp]/sigma[iexp] - 1) / sigma[iexp]
+
+ dsigma.deta <- dtheta.deta(sigma, .lscale , earg = .escale )
+ dShape.deta <- dtheta.deta(Shape, .lshape , earg = .eshape )
+
+ myderiv <-
+ c(w) * cbind(dl.dsigma * dsigma.deta,
+ dl.dShape * dShape.deta)
+ myderiv[, interleave.VGAM(M, M = Musual)]
+ }), list( .tolshape0 = tolshape0,
+ .lscale = lscale, .escale = escale,
+ .lshape = lshape, .eshape = eshape ))),
+ weight = eval(substitute(expression({
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
- if (length(percentiles) &&
- (!is.Numeric(percentiles, positive = TRUE) ||
- max(percentiles) >= 100))
- stop("bad input for argument 'percentiles'")
- if (!is.Numeric(tolshape0, allowable.length = 1, positive = TRUE) ||
- tolshape0 > 0.1)
- stop("bad input for argument 'tolshape0'")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
+ ned2l.dscale2 <- 1 / ((1+2*Shape) * sigma^2)
+ ned2l.dshape2 <- 2 / ((1+2*Shape) * (1+Shape))
+ ned2l.dshapescale <- 1 / ((1+2*Shape) * (1+Shape) * sigma) # > 0 !
- if (!is.list(escale)) escale = list()
- if (!is.list(eshape)) eshape = list()
+ NOS <- M / Musual
+ wz <- matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
- new("vglmff",
- blurb = c("Generalized Pareto distribution\n",
- "Links: ",
- namesof("scale", link = lscale, earg = escale ), ", ",
- namesof("shape", link = lshape, earg = eshape)),
- constraints=eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(as.matrix(y)) != 1)
- stop("response must be a vector or one-column matrix")
- Threshold = if (is.Numeric( .threshold)) .threshold else 0
- if (is.Numeric( .threshold)) {
- orig.y = y
- }
- ystar = y - Threshold # Operate on ystar
- extra$threshold = Threshold
- predictors.names=
- c(namesof("scale", .lscale, earg = .escale, short = TRUE),
- namesof("shape", .lshape, earg = .eshape, short = TRUE ))
- if (!length(etastart)) {
- meany = mean(ystar)
- vary = var(ystar)
- init.xi = if (length( .ishape)) .ishape else {
- if ( .imethod == 1) -0.5*(meany^2/vary - 1) else
- 0.5 * (1 - median(ystar)^2 / vary)
- }
- init.sig = if (length( .iscale)) .iscale else {
- if (.imethod == 1) 0.5*meany*(meany^2/vary + 1) else
- abs(1-init.xi) * median(ystar)
- }
- init.sig[init.sig <= 0] = 0.01 # sigma > 0
- init.xi[init.xi <= -0.5] = -0.40 # Fisher scoring works if xi > -0.5
- init.xi[init.xi >= 1.0] = 0.90 # Mean/var exists if xi < 1 / 0.5
- if ( .lshape == "loge") init.xi[init.xi <= 0.0] = 0.05
- init.sig = rep(init.sig, leng=length(y))
- init.xi = rep(init.xi, leng=length(y))
-
- etastart = cbind(theta2eta(init.sig, .lscale, earg = .escale ),
- theta2eta(init.xi, .lshape, earg = .eshape ))
- }
- }), list( .lscale = lscale, .lshape = lshape, .threshold=threshold,
- .iscale = iscale, .ishape = ishape,
- .escale = escale, .eshape = eshape,
- .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- sigma = eta2theta(eta[, 1], .lscale, earg = .escale )
- Shape = eta2theta(eta[, 2], .lshape, earg = .eshape )
- cent = .percentiles
- LP = length(cent) # NULL means LP == 0 and the mean is returned
- Threshold = if (is.Numeric( .threshold)) .threshold else 0
- if (LP) {
- fv = matrix(as.numeric(NA), nrow(eta), LP)
- is.zero = (abs(Shape) < .tolshape0)
- for(ii in 1:LP) {
- temp = 1-cent[ii]/100
- fv[!is.zero,ii] = Threshold + (temp^(-Shape[!is.zero])-1) *
- sigma[!is.zero] / Shape[!is.zero]
- fv[ is.zero,ii] = Threshold - sigma[is.zero] * log(temp)
- }
- dimnames(fv) = list(dimnames(eta)[[1]],
- paste(as.character(.percentiles), "%",
- sep = ""))
- } else {
- fv = Threshold + sigma / (1 - Shape) # This is the mean, E(Y)
- fv[Shape >= 1] = NA # Mean exists only if Shape < 1.
- }
- fv
- }, list( .lscale = lscale, .lshape = lshape, .threshold=threshold,
- .escale = escale, .eshape = eshape,
- .tolshape0 = tolshape0, .percentiles = percentiles ))),
- last = eval(substitute(expression({
- misc$links = c(scale = .lscale, shape = .lshape)
- misc$true.mu = FALSE # @fitted is not a true mu
- misc$earg = list(scale= .escale , shape= .eshape )
- misc$percentiles = .percentiles
- misc$threshold = if (is.Numeric( .threshold)) .threshold else 0
- misc$expected = TRUE
- misc$tolshape0 = .tolshape0
- if (any(Shape < -0.5))
- warning("some values of the shape parameter are less than -0.5")
- }), list( .lscale = lscale, .lshape = lshape, .threshold=threshold,
- .escale = escale, .eshape = eshape,
- .tolshape0 = tolshape0, .percentiles = percentiles ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- sigma = eta2theta(eta[, 1], .lscale, earg = .escale )
- Shape = eta2theta(eta[, 2], .lshape, earg = .eshape )
- Threshold = extra$threshold
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dgpd(x = y, location = Threshold, scale = sigma,
- shape = Shape, tolshape0 = .tolshape0,
- giveWarning = .giveWarning,
- log = TRUE, oobounds.log = -1.0e04))
- }
- }, list( .tolshape0 = tolshape0, .giveWarning= giveWarning,
- .escale = escale, .eshape = eshape,
- .lscale = lscale, .lshape = lshape ))),
- vfamily = c("gpd", "vextremes"),
- deriv = eval(substitute(expression({
- sigma = eta2theta(eta[, 1], .lscale, earg = .escale )
- Shape = eta2theta(eta[, 2], .lshape, earg = .eshape )
- Threshold = extra$threshold
- ystar = y - Threshold # Operate on ystar
- A = 1 + Shape*ystar/sigma
- mytolerance = .Machine$double.eps
- bad <- (A <= mytolerance)
- if (any(bad) && any(w[bad] != 0)) {
- cat(sum(w[bad],na.rm = TRUE), # "; ignoring them"
- "observations violating boundary constraints\n")
- flush.console()
- }
- if (any(is.zero <- (abs(Shape) < .tolshape0))) {
- }
- igpd = !is.zero & !bad
- iexp = is.zero & !bad
- dl.dShape = dl.dsigma = rep(0, length.out = length(y))
- dl.dsigma[igpd] = ((1 + Shape[igpd]) * ystar[igpd] / (sigma[igpd] +
- Shape[igpd]*ystar[igpd]) - 1) / sigma[igpd]
- dl.dShape[igpd] = log(A[igpd])/Shape[igpd]^2 - (1 + 1/Shape[igpd]) *
- ystar[igpd] / (A[igpd] * sigma[igpd])
- dl.dShape[iexp] = ystar[iexp] *
- (0.5*ystar[iexp]/sigma[iexp] - 1) / sigma[iexp]
- dsigma.deta = dtheta.deta(sigma, .lscale, earg = .escale )
- dShape.deta = dtheta.deta(Shape, .lshape, earg = .eshape )
- c(w) * cbind(dl.dsigma * dsigma.deta,
- dl.dShape * dShape.deta)
- }), list( .tolshape0 = tolshape0,
- .lscale = lscale, .escale = escale,
- .lshape = lshape, .eshape = eshape ))),
- weight = eval(substitute(expression({
- n <- length(w) # needed!
- wz = matrix(as.numeric(NA), n, 3)
- wz[, iam(1, 1, M)] = 1 / ((1+2*Shape) * sigma^2)
- wz[, iam(2, 2, M)] = 2 / ((1+2*Shape) * (1+Shape))
- wz[, iam(1, 2, M)] = 1 / ((1+2*Shape) * (1+Shape) * sigma) # > 0 !
- wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] * dsigma.deta^2
- wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)] * dShape.deta^2
- wz[, iam(1, 2, M)] = wz[, iam(1, 2, M)] * dsigma.deta * dShape.deta
- c(w) * wz
- }), list( .lscale = lscale ))))
+ ind11 <- ind22 <- ind12 <- NULL
+ for (ii in 1:(M / Musual)) {
+ ind11 <- c(ind11, iam(Musual*ii - 1, Musual*ii - 1, M))
+ ind22 <- c(ind22, iam(Musual*ii - 0, Musual*ii - 0, M))
+ ind12 <- c(ind12, iam(Musual*ii - 1, Musual*ii - 0, M))
+ }
+ wz[, ind11] <- ned2l.dscale2 * dsigma.deta^2
+ wz[, ind22] <- ned2l.dshape2 * dShape.deta^2
+ wz[, ind12] <- ned2l.dshapescale * dsigma.deta * dShape.deta
+
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
+ }), list( .lscale = lscale ))))
}
@@ -1347,36 +1691,37 @@ meplot.default <- function(y, main = "Mean Excess Plot",
conf = 0.95, col = c("blue", "black", "blue"), type = "l", ...) {
- if (!is.Numeric(y))
- stop("bad input for argument 'y'")
+ if (!is.Numeric(y))
+ stop("bad input for argument 'y'")
- n = length(y)
- sy = sort(y)
- dsy = rev(sy) # decreasing sequence
- me = rev(cumsum(dsy)) / (n:1) - sy
- me2 = rev(cumsum(dsy^2))
- var = (me2 - (n:1) * (me+sy)^2) / (n:1)
- ci = qnorm((1+conf)/2) * sqrt(abs(var)) / sqrt(n:1)
-
- ci[length(ci)] = NA
-
- mymat = cbind(me - ci, me, me + ci)
- sy = sy - sqrt( .Machine$double.eps )
-
- matplot(sy, mymat, main = main,
- xlab = xlab, ylab = ylab,
- lty = lty, col = col, type = type, ...)
- invisible(list(threshold = sy, meanExcess = me,
- plusminus = ci))
+ n = length(y)
+ sy = sort(y)
+ dsy = rev(sy) # decreasing sequence
+ me = rev(cumsum(dsy)) / (n:1) - sy
+ me2 = rev(cumsum(dsy^2))
+ var = (me2 - (n:1) * (me+sy)^2) / (n:1)
+ ci = qnorm((1+conf)/2) * sqrt(abs(var)) / sqrt(n:1)
+
+ ci[length(ci)] = NA
+
+ mymat = cbind(me - ci, me, me + ci)
+ sy = sy - sqrt( .Machine$double.eps )
+
+ matplot(sy, mymat, main = main,
+ xlab = xlab, ylab = ylab,
+ lty = lty, col = col, type = type, ...)
+ invisible(list(threshold = sy,
+ meanExcess = me,
+ plusminus = ci))
}
meplot.vlm <- function(object, ...) {
- if (!length(y <- object at y))
- stop("y slot is empty")
- ans = meplot(as.numeric(y), ...)
- invisible(ans)
+ if (!length(y <- object at y))
+ stop("y slot is empty")
+ ans <- meplot(as.numeric(y), ...)
+ invisible(ans)
}
@@ -1398,8 +1743,10 @@ setMethod("meplot", "vlm",
-guplot.default <- function(y, main = "Gumbel Plot",
- xlab = "Reduced data", ylab = "Observed data", type = "p", ...) {
+guplot.default <-
+ function(y, main = "Gumbel Plot",
+ xlab = "Reduced data",
+ ylab = "Observed data", type = "p", ...) {
if (!is.Numeric(y))
stop("bad input for argument 'y'")
@@ -1417,7 +1764,7 @@ guplot.default <- function(y, main = "Gumbel Plot",
guplot.vlm <- function(object, ...) {
if (!length(y <- object at y))
stop("y slot is empty")
- ans = guplot(as.numeric(y), ...)
+ ans <- guplot(as.numeric(y), ...)
invisible(ans)
}
@@ -1444,37 +1791,40 @@ setMethod("guplot", "vlm",
egumbel <- function(llocation = "identity",
- lscale = "loge",
- elocation = list(),
- escale = list(),
- iscale = NULL,
- R=NA, percentiles = c(95,99),
- mpv = FALSE, zero = NULL)
+ lscale = "loge",
+ iscale = NULL,
+ R = NA, percentiles = c(95, 99),
+ mpv = FALSE, zero = NULL)
{
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
+
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
if (!is.logical(mpv) || length(mpv) != 1)
stop("bad input for argument 'mpv'")
if (length(percentiles) &&
(!is.Numeric(percentiles, positive = TRUE) ||
max(percentiles) >= 100))
stop("bad input for argument 'percentiles'")
+
if (length(zero) &&
!is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
+
if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
- if (!is.list(elocation)) elocation = list()
- if (!is.list(escale)) escale = list()
-
new("vglmff",
blurb = c("Gumbel distribution (univariate response)\n\n",
"Links: ",
- namesof("location", llocation,
- earg = elocation, tag = TRUE), ", ",
+ namesof("location", llocat,
+ earg = elocat, tag = TRUE), ", ",
namesof("scale", lscale, earg = escale , tag = TRUE), "\n",
"Mean: location + scale*0.5772..\n",
"Variance: pi^2 * scale^2 / 6"),
@@ -1487,96 +1837,106 @@ setMethod("guplot", "vlm",
stop("Use gumbel() to handle multivariate responses")
if (min(y) <= 0)
stop("all response values must be positive")
- predictors.names =
- c(namesof("location", .llocation, earg = .elocation, tag = FALSE),
- namesof("scale", .lscale, earg = .escale , tag = FALSE))
+
+
+
+
+
+
+ predictors.names <-
+ c(namesof("location", .llocat , earg = .elocat , tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE))
+
extra$R = .R
extra$mpv = .mpv
extra$percentiles = .percentiles
if (!length(etastart)) {
- sc.init = if (is.Numeric( .iscale, positive = TRUE))
- .iscale else 1.5 * (0.01+sqrt(var(y)*6)) / pi
- sc.init = rep(sc.init, length.out = n)
- EulerM = -digamma(1)
- loc.init = (y - sc.init * EulerM)
- etastart = cbind(theta2eta(loc.init, .llocation, earg = .elocation),
- theta2eta(sc.init, .lscale, earg = .escale ))
+ sca.init = if (is.Numeric( .iscale, positive = TRUE))
+ .iscale else 1.5 * (0.01+sqrt(var(y)*6)) / pi
+ sca.init = rep(sca.init, length.out = n)
+ EulerM <- -digamma(1)
+ loc.init = (y - sca.init * EulerM)
+ etastart <-
+ cbind(theta2eta(loc.init, .llocat , earg = .elocat ),
+ theta2eta(sca.init, .lscale , earg = .escale ))
}
- }), list( .llocation = llocation, .lscale = lscale,
- .elocation = elocation, .escale = escale,
- .iscale = iscale,
+ }), list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale,
+ .iscale = iscale,
.R = R, .mpv = mpv, .percentiles = percentiles ))),
linkinv = eval(substitute( function(eta, extra = NULL) {
- loc = eta2theta(eta[, 1], .llocation, earg = .elocation)
- sigma = eta2theta(eta[, 2], .lscale, earg = .escale )
- EulerM = -digamma(1)
+ locat = eta2theta(eta[, 1], .llocat, earg = .elocat)
+ sigma = eta2theta(eta[, 2], .lscale , earg = .escale )
+ EulerM <- -digamma(1)
Percentiles = extra$percentiles
mpv = extra$mpv
LP = length(Percentiles) # may be 0
- if (!LP) return(loc + sigma * EulerM)
+ if (!LP) return(locat + sigma * EulerM)
mu = matrix(as.numeric(NA), nrow(eta), LP + mpv)
Rvec = extra$R
if (1 <= LP)
for(ii in 1:LP) {
ci = if (is.Numeric(Rvec)) Rvec * (1 - Percentiles[ii] / 100) else
-log(Percentiles[ii] / 100)
- mu[,ii] = loc - sigma * log(ci)
+ mu[,ii] = locat - sigma * log(ci)
}
if (mpv)
- mu[, ncol(mu)] = loc - sigma * log(log(2))
+ mu[, ncol(mu)] = locat - sigma * log(log(2))
dmn2 = if (LP >= 1) paste(as.character(Percentiles), "%",
sep = "") else NULL
if (mpv)
dmn2 = c(dmn2, "MPV")
dimnames(mu) = list(dimnames(eta)[[1]], dmn2)
mu
- }, list( .llocation = llocation, .lscale = lscale,
- .elocation = elocation, .escale = escale ))),
+ }, list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale ))),
last = eval(substitute(expression({
- misc$link = c(location= .llocation, scale = .lscale)
- misc$earg = list(location= .elocation, scale= .escale)
+ misc$link = c(location = .llocat, scale = .lscale)
+ misc$earg = list(location = .elocat, scale = .escale)
misc$true.mu = !length( .percentiles) # @fitted is not a true mu
misc$R = .R
misc$mpv = .mpv
misc$percentiles = .percentiles
- }), list( .llocation = llocation, .lscale = lscale, .mpv = mpv,
- .elocation = elocation, .escale = escale,
+ }), list( .llocat = llocat, .lscale = lscale, .mpv = mpv,
+ .elocat = elocat, .escale = escale,
.R = R, .percentiles = percentiles ))),
loglikelihood = eval(substitute(
- function(mu,y,w,residuals= FALSE,eta,extra = NULL) {
- loc = eta2theta(eta[, 1], .llocation, earg = .elocation)
- sc = eta2theta(eta[, 2], .lscale, earg = .escale )
+ function(mu, y, w, residuals = FALSE,eta,extra = NULL) {
+ loc = eta2theta(eta[, 1], .llocat , earg = .elocat )
+ sca = eta2theta(eta[, 2], .lscale , earg = .escale )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dgumbel(x=y, location = loc, scale=sc, log = TRUE))
+ sum(w * dgumbel(x=y, location = loc, scale = sca, log = TRUE))
}
- }, list( .llocation = llocation, .lscale = lscale,
- .elocation = elocation, .escale = escale ))),
+ }, list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale ))),
vfamily = "egumbel",
deriv = eval(substitute(expression({
- loc = eta2theta(eta[, 1], .llocation, earg = .elocation)
- sc = eta2theta(eta[, 2], .lscale, earg = .escale )
- zedd = (y-loc) / sc
+ loc = eta2theta(eta[, 1], .llocat , earg = .elocat )
+ sca = eta2theta(eta[, 2], .lscale , earg = .escale )
+ zedd = (y-loc) / sca
temp2 = -expm1(-zedd)
- dl.dloc = temp2 / sc
- dl.dsc = -1/sc + temp2 * zedd / sc
- dloc.deta = dtheta.deta(loc, .llocation, earg = .elocation)
- dsc.deta = dtheta.deta(sc, .lscale, earg = .escale )
+ dl.dloc = temp2 / sca
+ dl.dsca = -1/sca + temp2 * zedd / sca
+ dloc.deta = dtheta.deta(loc, .llocat , earg = .elocat)
+ dsca.deta = dtheta.deta(sca, .lscale , earg = .escale )
c(w) * cbind(dl.dloc * dloc.deta,
- dl.dsc * dsc.deta)
- }), list( .llocation = llocation, .lscale = lscale,
- .elocation = elocation, .escale = escale ))),
+ dl.dsca * dsca.deta)
+ }), list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale ))),
weight=expression({
digamma1 = digamma(1)
- ed2l.dsc2 = ((2+digamma1)*digamma1 + trigamma(1) + 1) / sc^2
- ed2l.dloc2 = 1 / sc^2
- ed2l.dscloc = -(1 + digamma1) / sc^2
+ ned2l.dsca2 = ((2+digamma1)*digamma1 + trigamma(1) + 1) / sca^2
+ ned2l.dloc2 = 1 / sca^2
+ ned2l.dscaloc = -(1 + digamma1) / sca^2
+
wz = matrix(as.numeric(NA), n, dimm(M = 2))
- wz[, iam(1, 1, M)] = ed2l.dloc2 * dloc.deta^2
- wz[, iam(2, 2, M)] = ed2l.dsc2 * dsc.deta^2
- wz[, iam(1, 2, M)] = ed2l.dscloc * dloc.deta * dsc.deta
+ wz[, iam(1, 1, M)] = ned2l.dloc2 * dloc.deta^2
+ wz[, iam(2, 2, M)] = ned2l.dsca2 * dsca.deta^2
+ wz[, iam(1, 2, M)] = ned2l.dscaloc * dloc.deta * dsca.deta
+
c(w) * wz
}))
}
@@ -1585,30 +1945,34 @@ setMethod("guplot", "vlm",
cgumbel <- function(llocation = "identity",
- lscale = "loge",
- elocation = list(),
- escale = list(), iscale = NULL,
- mean = TRUE, percentiles = NULL, zero = 2)
+ lscale = "loge",
+ iscale = NULL,
+ mean = TRUE, percentiles = NULL, zero = 2)
{
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
if (!is.logical(mean) || length(mean) != 1)
stop("mean must be a single logical value")
if (!mean && (!is.Numeric(percentiles, positive = TRUE) ||
any(percentiles >= 100)))
- stop("valid percentiles values must be given when mean = FALSE")
+ stop("valid percentiles values must be given when mean = FALSE")
+
if (length(zero) &&
!is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (!is.list(elocation)) elocation = list()
- if (!is.list(escale)) escale = list()
+
new("vglmff",
blurb = c("Censored Gumbel distribution\n\n",
"Links: ",
- namesof("location", llocation, earg = elocation, tag = TRUE),
+ namesof("location", llocat, earg = elocat, tag = TRUE),
", ",
namesof("scale", lscale, earg = escale, tag = TRUE),
"\n",
@@ -1624,6 +1988,10 @@ setMethod("guplot", "vlm",
if (any(y) <= 0)
stop("all response values must be positive")
+
+
+
+
if (!length(extra$leftcensored))
extra$leftcensored = rep(FALSE, length.out = n)
if (!length(extra$rightcensored))
@@ -1631,55 +1999,56 @@ setMethod("guplot", "vlm",
if (any(extra$rightcensored & extra$leftcensored))
stop("some observations are both right and left censored!")
- predictors.names =
- c(namesof("location", .llocation, earg = .elocation, tag = FALSE),
- namesof("scale", .lscale, earg = .escale , tag = FALSE))
-
- if (!length(etastart)) {
- sc.init = if (is.Numeric( .iscale, positive = TRUE))
- .iscale else 1.1 * sqrt(var(y) * 6 ) / pi
- sc.init = rep(sc.init, length.out = n)
- EulerM = -digamma(1)
- loc.init = (y - sc.init * EulerM)
- loc.init[loc.init <= 0] = min(y)
- etastart = cbind(theta2eta(loc.init, .llocation, earg = .elocation ),
- theta2eta(sc.init, .lscale, earg = .escale ))
- }
- }), list( .lscale = lscale, .iscale = iscale,
- .llocation = llocation,
- .elocation = elocation, .escale = escale ))),
- linkinv = eval(substitute( function(eta, extra = NULL) {
- loc = eta2theta(eta[, 1], .llocation)
- sc = eta2theta(eta[, 2], .lscale)
- EulerM = -digamma(1)
- if (.mean) loc + sc * EulerM else {
- LP = length(.percentiles) # 0 if NULL
- mu = matrix(as.numeric(NA), nrow(eta), LP)
- for(ii in 1:LP) {
- ci = -log( .percentiles[ii] / 100)
- mu[, ii] = loc - sc * log(ci)
- }
- dmn2 = paste(as.character(.percentiles), "%", sep = "")
- dimnames(mu) <- list(dimnames(eta)[[1]], dmn2)
- mu
- }
- }, list( .lscale = lscale, .percentiles = percentiles,
- .llocation = llocation,
- .elocation = elocation, .escale = escale ,
- .mean=mean ))),
- last = eval(substitute(expression({
- misc$link = c(location= .llocation, scale = .lscale)
- misc$earg = list(location= .elocation, scale= .escale )
+ predictors.names <-
+ c(namesof("location", .llocat, earg = .elocat, tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE))
+
+ if (!length(etastart)) {
+ sc.init = if (is.Numeric( .iscale, positive = TRUE))
+ .iscale else 1.1 * sqrt(var(y) * 6 ) / pi
+ sc.init = rep(sc.init, length.out = n)
+ EulerM <- -digamma(1)
+ loc.init = (y - sc.init * EulerM)
+ loc.init[loc.init <= 0] = min(y)
+ etastart <-
+ cbind(theta2eta(loc.init, .llocat , earg = .elocat ),
+ theta2eta(sc.init, .lscale , earg = .escale ))
+ }
+ }), list( .lscale = lscale, .iscale = iscale,
+ .llocat = llocat,
+ .elocat = elocat, .escale = escale ))),
+ linkinv = eval(substitute( function(eta, extra = NULL) {
+ loc = eta2theta(eta[, 1], .llocat)
+ sc = eta2theta(eta[, 2], .lscale)
+ EulerM <- -digamma(1)
+ if (.mean) loc + sc * EulerM else {
+ LP = length(.percentiles) # 0 if NULL
+ mu = matrix(as.numeric(NA), nrow(eta), LP)
+ for(ii in 1:LP) {
+ ci = -log( .percentiles[ii] / 100)
+ mu[, ii] = loc - sc * log(ci)
+ }
+ dmn2 = paste(as.character(.percentiles), "%", sep = "")
+ dimnames(mu) <- list(dimnames(eta)[[1]], dmn2)
+ mu
+ }
+ }, list( .lscale = lscale, .percentiles = percentiles,
+ .llocat = llocat,
+ .elocat = elocat, .escale = escale ,
+ .mean=mean ))),
+ last = eval(substitute(expression({
+ misc$link = c(location= .llocat, scale = .lscale)
+ misc$earg = list(location= .elocat, scale= .escale )
misc$true.mu = .mean # if FALSE then @fitted is not a true mu
misc$percentiles = .percentiles
}), list( .lscale = lscale, .mean=mean,
- .llocation = llocation,
- .elocation = elocation, .escale = escale ,
+ .llocat = llocat,
+ .elocat = elocat, .escale = escale ,
.percentiles = percentiles ))),
loglikelihood = eval(substitute(
- function(mu,y,w,residuals= FALSE,eta,extra = NULL) {
- loc = eta2theta(eta[, 1], .llocation, earg = .elocation )
- sc = eta2theta(eta[, 2], .lscale, earg = .escale )
+ function(mu, y, w, residuals = FALSE,eta,extra = NULL) {
+ loc = eta2theta(eta[, 1], .llocat, earg = .elocat )
+ sc = eta2theta(eta[, 2], .lscale , earg = .escale )
zedd = (y-loc) / sc
cenL = extra$leftcensored
@@ -1693,22 +2062,22 @@ setMethod("guplot", "vlm",
"implemented yet") else
sum(w[cen0] * ell1) + sum(w[cenL] * ell2) + sum(w[cenU] * ell3)
}, list( .lscale = lscale,
- .llocation = llocation,
- .elocation = elocation, .escale = escale ))),
+ .llocat = llocat,
+ .elocat = elocat, .escale = escale ))),
vfamily = "cgumbel",
deriv = eval(substitute(expression({
cenL = extra$leftcensored
cenU = extra$rightcensored
cen0 = !cenL & !cenU # uncensored obsns
- loc = eta2theta(eta[, 1], .llocation, earg = .elocation )
- sc = eta2theta(eta[, 2], .lscale, earg = .escale )
+ loc = eta2theta(eta[, 1], .llocat, earg = .elocat )
+ sc = eta2theta(eta[, 2], .lscale , earg = .escale )
zedd = (y-loc) / sc
temp2 = -expm1(-zedd)
dl.dloc = temp2 / sc
dl.dsc = -1/sc + temp2 * zedd / sc
- dloc.deta = dtheta.deta(loc, .llocation, earg = .elocation )
- dsc.deta = dtheta.deta(sc, .lscale, earg = .escale )
+ dloc.deta = dtheta.deta(loc, .llocat, earg = .elocat )
+ dsc.deta = dtheta.deta(sc, .lscale , earg = .escale )
ezedd = exp(-zedd)
Fy = exp(-ezedd)
@@ -1725,8 +2094,8 @@ setMethod("guplot", "vlm",
c(w) * cbind(dl.dloc * dloc.deta,
dl.dsc * dsc.deta)
}), list( .lscale = lscale,
- .llocation = llocation,
- .elocation = elocation, .escale = escale ))),
+ .llocat = llocat,
+ .elocat = elocat, .escale = escale ))),
weight=expression({
A1 = ifelse(cenL, Fy, 0)
A3 = ifelse(cenU, 1-Fy, 0)
@@ -1763,7 +2132,7 @@ setMethod("guplot", "vlm",
dfrechet <- function(x, location = 0, scale = 1, shape, log = FALSE) {
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -1791,7 +2160,7 @@ pfrechet <- function(q, location = 0, scale = 1, shape) {
if (!is.Numeric(shape, positive = TRUE))
stop("shape must be positive")
rzedd = scale / (q - location)
- ans = exp(-(rzedd^shape))
+ ans <- exp(-(rzedd^shape))
ans[q <= location] = 0
ans
}
@@ -1833,9 +2202,7 @@ frechet2.control <- function(save.weight = TRUE, ...)
frechet2 <- function(location = 0,
lscale = "loge",
- lshape = "logoff",
- escale = list(),
- eshape = list(offset = -2),
+ lshape = logoff(offset = -2),
iscale = NULL, ishape = NULL,
nsimEIM = 250,
zero = NULL)
@@ -1844,13 +2211,15 @@ frechet2.control <- function(save.weight = TRUE, ...)
if (!is.Numeric(location))
stop("bad input for argument 'location'")
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale <- as.character(substitute(lscale))
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape <- as.character(substitute(lshape))
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+ lshape <- as.list(substitute(lshape))
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
+
- if (!is.list(escale)) escale = list()
- if (!is.list(eshape)) eshape = list()
stopifnot(nsimEIM > 10, length(nsimEIM) == 1, nsimEIM == round(nsimEIM))
@@ -1864,15 +2233,28 @@ frechet2.control <- function(save.weight = TRUE, ...)
constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("scale", .lscale, earg = .escale, short = TRUE),
- namesof("shape", .lshape, earg = .eshape, short = TRUE))
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 1,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+ predictors.names <-
+ c(namesof("scale", .lscale , earg = .escale, short = TRUE),
+ namesof("shape", .lshape , earg = .eshape, short = TRUE))
+
extra$location = rep( .location, length.out = n) # stored here
+
if (!length(etastart)) {
locinit = extra$location
if (any(y <= locinit))
@@ -1894,47 +2276,48 @@ frechet2.control <- function(save.weight = TRUE, ...)
abs.arg = TRUE)
shape.init = if (length( .ishape ))
- rep( .ishape, length.out = n) else {
- rep(try.this, length.out = n) # variance exists if shape > 2
+ rep( .ishape , length.out = n) else {
+ rep(try.this , length.out = n) # variance exists if shape > 2
}
- myprobs = c(0.25, 0.5, 0.75)
- myobsns = quantile(y, probs = myprobs)
- myquant = (-log(myprobs))^(-1/shape.init[1])
- myfit = lsfit(x = myquant, y = myobsns)
+ myprobs = c(0.25, 0.5, 0.75)
+ myobsns = quantile(y, probs = myprobs)
+ myquant = (-log(myprobs))^(-1/shape.init[1])
+ myfit = lsfit(x = myquant, y = myobsns)
- Scale.init = if (length( .iscale))
- rep( .iscale, length.out = n) else {
- if (all(shape.init > 1)) {
- myfit$coef[2]
- } else {
- rep( 1.0, length.out = n)
- }
+ Scale.init = if (length( .iscale ))
+ rep( .iscale , length.out = n) else {
+ if (all(shape.init > 1)) {
+ myfit$coef[2]
+ } else {
+ rep( 1.0, length.out = n)
}
+ }
- etastart = cbind(theta2eta(Scale.init, .lscale, earg = .escale ),
- theta2eta(shape.init, .lshape, earg = .escale ))
- }
+ etastart <-
+ cbind(theta2eta(Scale.init, .lscale , earg = .escale ),
+ theta2eta(shape.init, .lshape , earg = .eshape ))
+ }
}), list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape,
.iscale = iscale, .ishape = ishape,
.location = location ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
loc = extra$location
- Scale = eta2theta(eta[, 1], .lscale, earg = .escale )
- shape = eta2theta(eta[, 2], .lshape, earg = .eshape )
+ Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
- ans = rep(as.numeric(NA), length.out = length(shape))
- ok = shape > 1
- ans[ok] = loc[ok] + Scale[ok] * gamma(1 - 1/shape[ok])
+ ans <- rep(as.numeric(NA), length.out = length(shape))
+ ok <- shape > 1
+ ans[ok] <- loc[ok] + Scale[ok] * gamma(1 - 1/shape[ok])
ans
}, list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape ))),
last = eval(substitute(expression({
- misc$links <- c("scale" = .lscale, "shape" = .lshape)
+ misc$links <- c("scale" = .lscale , "shape" = .lshape )
- misc$earg <- list("scale" = .escale, "shape" = .eshape)
+ misc$earg <- list("scale" = .escale , "shape" = .eshape )
misc$nsimEIM = .nsimEIM
}), list( .lscale = lscale, .lshape = lshape,
@@ -1943,8 +2326,8 @@ frechet2.control <- function(save.weight = TRUE, ...)
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
loctn = extra$location
- Scale = eta2theta(eta[, 1], .lscale, earg = .escale )
- shape = eta2theta(eta[, 2], .lshape, earg = .eshape )
+ Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
sum(w * dfrechet(x = y, location = loctn, scale = Scale,
@@ -1954,18 +2337,18 @@ frechet2.control <- function(save.weight = TRUE, ...)
vfamily = c("frechet2", "vextremes"),
deriv = eval(substitute(expression({
loctn = extra$location
- Scale = eta2theta(eta[, 1], .lscale, earg = .escale )
- shape = eta2theta(eta[, 2], .lshape, earg = .eshape )
+ Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
- rzedd = Scale / (y - loctn) # reciprocial of zedd
+ rzedd = Scale / (y - loctn) # reciprocial of zedd
dl.dloctn = (shape + 1) / (y - loctn) -
(shape / (y - loctn)) * (rzedd)^shape
dl.dScale = shape * (1 - rzedd^shape) / Scale
dl.dshape = 1 / shape + log(rzedd) * (1 - rzedd^shape)
dthetas.detas <- cbind(
- dScale.deta <- dtheta.deta(Scale, .lscale, earg = .escale ),
- dShape.deta <- dtheta.deta(shape, .lshape, earg = .eshape ))
+ dScale.deta <- dtheta.deta(Scale, .lscale , earg = .escale ),
+ dShape.deta <- dtheta.deta(shape, .lshape , earg = .eshape ))
c(w) * cbind(dl.dScale,
dl.dshape) * dthetas.detas
@@ -1973,33 +2356,33 @@ frechet2.control <- function(save.weight = TRUE, ...)
.escale = escale, .eshape = eshape ))),
weight = eval(substitute(expression({
- run.varcov = 0
- ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ run.varcov <- 0
+ ind1 <- iam(NA, NA, M = M, both = TRUE, diag = TRUE)
if (length( .nsimEIM )) {
for(ii in 1:( .nsimEIM )) {
- ysim = rfrechet(n, loc = loctn, scale = Scale, shape = shape)
+ ysim <- rfrechet(n, loc = loctn, scale = Scale, shape = shape)
- rzedd = Scale / (ysim - loctn) # reciprocial of zedd
- dl.dloctn = (shape + 1) / (ysim - loctn) -
+ rzedd <- Scale / (ysim - loctn) # reciprocial of zedd
+ dl.dloctn <- (shape + 1) / (ysim - loctn) -
(shape / (ysim - loctn)) * (rzedd)^shape
- dl.dScale = shape * (1 - rzedd^shape) / Scale
- dl.dshape = 1 / shape + log(rzedd) * (1 - rzedd^shape)
+ dl.dScale <- shape * (1 - rzedd^shape) / Scale
+ dl.dshape <- 1 / shape + log(rzedd) * (1 - rzedd^shape)
rm(ysim)
- temp3 = cbind(dl.dScale, dl.dshape)
- run.varcov = run.varcov +
+ temp3 <- cbind(dl.dScale, dl.dshape)
+ run.varcov <- run.varcov +
temp3[, ind1$row.index] *
temp3[, ind1$col.index]
}
- run.varcov = run.varcov / .nsimEIM
+ run.varcov <- run.varcov / .nsimEIM
wz = if (intercept.only)
matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
- wz = c(w) * wz * dthetas.detas[, ind1$row] *
- dthetas.detas[, ind1$col]
+ wz = c(w) * wz * dthetas.detas[, ind1$row.index] *
+ dthetas.detas[, ind1$col.index]
} else {
stop("argument 'nsimEIM' must be numeric")
}
@@ -2024,28 +2407,25 @@ if (FALSE)
frechet3 <- function(anchor = NULL,
ldifference = "loge",
lscale = "loge",
- lshape = "logoff",
- edifference = list(),
- escale = list(),
- eshape = list(offset = -2),
+ lshape = logoff(offset = -2),
ilocation = NULL, iscale = NULL, ishape = NULL,
nsimEIM = 250,
zero = 1)
{
- ediffr = edifference
- ldiffr = ldifference
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+ lshape <- as.list(substitute(lshape))
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
+
+ ldiffr <- as.list(substitute(ldifference))
+ ediffr <- link2list(ldiffr)
+ ldiffr <- attr(escale, "function.name")
- if (mode(ldiffr) != "character" && mode(ldiffr) != "name")
- ldiffr <- as.character(substitute(ldiffr))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale <- as.character(substitute(lscale))
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape <- as.character(substitute(lshape))
- if (!is.list(ediffr)) ediffr = list()
- if (!is.list(escale)) escale = list()
- if (!is.list(eshape)) eshape = list()
stopifnot(nsimEIM > 10, length(nsimEIM) == 1, nsimEIM == round(nsimEIM))
@@ -2063,10 +2443,10 @@ if (FALSE)
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("difference", .ldiffr, earg = .ediffr, short = TRUE),
- namesof("scale", .lscale, earg = .escale, short = TRUE),
- namesof("shape", .lshape, earg = .eshape, short = TRUE))
+ predictors.names <-
+ c(namesof("difference", .ldiffr , earg = .ediffr, short = TRUE),
+ namesof("scale", .lscale , earg = .escale, short = TRUE),
+ namesof("shape", .lshape , earg = .eshape, short = TRUE))
anchorpt = if (is.Numeric( .anchor, allowable.length = 1))
.anchor else min(y)
@@ -2090,8 +2470,6 @@ if (FALSE)
try.this = getMaxMin(shape.grid, objfun = frech.aux,
y = y, x = x, w = w, maximize = FALSE,
abs.arg = TRUE)
- print("try.this")
- print( try.this )
shape.init =
if (length( .ishape ))
@@ -2106,8 +2484,6 @@ if (FALSE)
myobsns = quantile(y, probs = myprobs)
myquant = (-log(myprobs))^(-1/shape.init[1])
myfit = lsfit(x = myquant, y = myobsns)
- print("myfit$coef")
- print( myfit$coef )
plot(myobsns ~ myquant)
@@ -2125,13 +2501,8 @@ if (FALSE)
locinit = if (length( .ilocation))
rep( .ilocation, length.out = n) else {
if (myfit$coef[1] < min(y)) {
- print("using myfit$coef[1] for initial location")
- print( myfit$coef[1] )
- print( min(y) )
- print( anchorpt )
rep(myfit$coef[1], length.out = n)
} else {
- print("using heuristic initial location")
rep(anchorpt - 0.01 * diff(range(y)), length.out = n)
}
}
@@ -2143,13 +2514,10 @@ if (FALSE)
- etastart = cbind(theta2eta(anchorpt - locinit, .ldiffr),
- theta2eta(Scale.init, .lscale),
- theta2eta(shape.init, .lshape))
- print("head(etastart)")
- print( head(etastart) )
- print("summary(etastart)")
- print( summary(etastart) )
+ etastart <-
+ cbind(theta2eta(anchorpt - locinit, .ldiffr),
+ theta2eta(Scale.init, .lscale),
+ theta2eta(shape.init, .lshape))
}
}), list( .ldiffr = ldiffr, .lscale = lscale, .lshape = lshape,
.ediffr = ediffr, .escale = escale, .eshape = eshape,
@@ -2157,18 +2525,18 @@ if (FALSE)
.ilocation = ilocation, .anchor = anchor ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
loctn = extra$LHSanchor -
- eta2theta(eta[, 1], .ldiffr, earg = .ediffr)
- Scale = eta2theta(eta[, 2], .lscale, earg = .escale )
- shape = eta2theta(eta[, 3], .lshape, earg = .eshape )
- ans = rep(as.numeric(NA), length.out = length(shape))
+ eta2theta(eta[, 1], .ldiffr , earg = .ediffr)
+ Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+ shape = eta2theta(eta[, 3], .lshape , earg = .eshape )
+ ans <- rep(as.numeric(NA), length.out = length(shape))
okay = shape > 1
ans[okay] = loctn[okay] + Scale[okay] * gamma(1 - 1/shape[okay])
ans
}, list( .ldiffr = ldiffr, .lscale = lscale, .lshape = lshape,
.ediffr = ediffr, .escale = escale, .eshape = eshape ))),
last = eval(substitute(expression({
- misc$links <- c("difference" = .ldiffr,
- "scale" = .lscale,
+ misc$links <- c("difference" = .ldiffr ,
+ "scale" = .lscale ,
"shape" = .lshape)
misc$earg <- list("difference" = .ediffr,
@@ -2185,9 +2553,9 @@ if (FALSE)
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
loctn = extra$LHSanchor -
- eta2theta(eta[, 1], .ldiffr, earg = .ediffr)
- Scale = eta2theta(eta[, 2], .lscale, earg = .escale )
- shape = eta2theta(eta[, 3], .lshape, earg = .eshape )
+ eta2theta(eta[, 1], .ldiffr , earg = .ediffr)
+ Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+ shape = eta2theta(eta[, 3], .lshape , earg = .eshape )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(w * dfrechet(x = y, location = loctn, scale = Scale,
@@ -2197,11 +2565,9 @@ if (FALSE)
.ediffr = ediffr, .escale = escale, .eshape = eshape ))),
vfamily = c("frechet3", "vextremes"),
deriv = eval(substitute(expression({
- print("summary(eta) in @deriv ,,,,,,,,,,,,,,")
- print( summary(eta) )
- Difrc = eta2theta(eta[, 1], .ldiffr, earg = .ediffr )
- Scale = eta2theta(eta[, 2], .lscale, earg = .escale )
- shape = eta2theta(eta[, 3], .lshape, earg = .eshape )
+ Difrc = eta2theta(eta[, 1], .ldiffr , earg = .ediffr )
+ Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+ shape = eta2theta(eta[, 3], .lshape , earg = .eshape )
loctn = extra$LHSanchor - Difrc
rzedd = Scale / (y - loctn) # reciprocial of zedd
@@ -2213,11 +2579,11 @@ if (FALSE)
dl.dshape = 1 / shape + log(rzedd) * (1 - rzedd^shape)
dthetas.detas <- cbind(
- ddifff.deta <- dtheta.deta(Difrc, .ldiffr, earg = .ediffr ),
- dScale.deta <- dtheta.deta(Scale, .lscale, earg = .escale ),
- dShape.deta <- dtheta.deta(shape, .lshape, earg = .eshape ))
+ ddifff.deta <- dtheta.deta(Difrc, .ldiffr , earg = .ediffr ),
+ dScale.deta <- dtheta.deta(Scale, .lscale , earg = .escale ),
+ dShape.deta <- dtheta.deta(shape, .lshape , earg = .eshape ))
- ans =
+ ans <-
c(w) * cbind(dl.ddifff,
dl.dScale,
dl.dshape) * dthetas.detas
@@ -2281,31 +2647,46 @@ recnormal1.control <- function(save.weight = TRUE, ...)
list(save.weight = save.weight)
}
+
recnormal1 <- function(lmean = "identity", lsd = "loge",
- imean = NULL, isd = NULL, imethod = 1, zero = NULL)
+ imean = NULL, isd = NULL, imethod = 1,
+ zero = NULL)
{
- if (mode(lmean) != "character" && mode(lmean) != "name")
- lmean = as.character(substitute(lmean))
- if (mode(lsd) != "character" && mode(lsd) != "name")
- lsd = as.character(substitute(lsd))
+ lmean <- as.list(substitute(lmean))
+ emean <- link2list(lmean)
+ lmean <- attr(emean, "function.name")
+
+ lsdev <- as.list(substitute(lsd))
+ esdev <- link2list(lsdev)
+ lsdev <- attr(esdev, "function.name")
+
+ isdev <- isd
+
+
if (!is.Numeric(imethod, allowable.length = 1,
integer.valued = TRUE, positive = TRUE) ||
imethod > 3.5)
stop("argument 'imethod' must be 1 or 2 or 3")
+
+
new("vglmff",
blurb = c("Upper record values from a univariate normal distribution\n\n",
"Links: ",
- namesof("mean", lmean, tag = TRUE), "; ",
- namesof("sd", lsd, tag = TRUE),
+ namesof("mean", lmean, emean, tag = TRUE), "; ",
+ namesof("sd", lsdev, esdev, tag = TRUE),
"\n",
"Variance: sd^2"),
constraints=eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
- predictors.names = c(namesof("mean", .lmean, tag = FALSE),
- namesof("sd", .lsd, tag = FALSE))
+
+
+
+ predictors.names <-
+ c(namesof("mean", .lmean, .emean, tag = FALSE),
+ namesof("sd", .lsdev, .esdev, tag = FALSE))
if (ncol(y <- cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
@@ -2314,51 +2695,62 @@ recnormal1.control <- function(save.weight = TRUE, ...)
stop("response must have increasingly larger and larger values")
if (any(w != 1))
warning("weights should have unit values only")
+
+
if (!length(etastart)) {
mean.init = if (length( .imean)) rep( .imean ,
length.out = n) else {
if (.lmean == "loge") pmax(1/1024, min(y)) else min(y)}
- sd.init = if (length( .isd)) rep( .isd, length.out = n) else {
+ sd.init = if (length( .isdev)) rep( .isdev, length.out = n) else {
if (.imethod == 1) 1*(sd(c(y))) else
if (.imethod == 2) 5*(sd(c(y))) else
.5*(sd(c(y)))
}
- etastart = cbind(theta2eta(rep(mean.init, len = n), .lmean),
- theta2eta(rep(sd.init, len = n), .lsd))
+ etastart <-
+ cbind(theta2eta(rep(mean.init, len = n), .lmean, .emean ),
+ theta2eta(rep(sd.init, len = n), .lsdev, .esdev ))
}
- }), list( .lmean = lmean, .lsd = lsd, .imean = imean, .isd = isd,
- .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[, 1], .lmean)
- }, list( .lmean = lmean ))),
- last = eval(substitute(expression({
- misc$link = c("mu" = .lmean, "sd" = .lsd)
- misc$expected = FALSE
- }), list( .lmean = lmean, .lsd = lsd ))),
- loglikelihood = eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra = NULL) {
- sd = eta2theta(eta[, 2], .lsd)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- zedd = (y - mu) / sd
- NN = nrow(eta)
- sum(w * (-log(sd) - 0.5 * zedd^2)) -
- sum(w[-NN] * pnorm(zedd[-NN], lower.tail = FALSE, log.p = TRUE))
+ }), list( .lmean = lmean, .lsdev = lsdev,
+ .emean = emean, .esdev = esdev,
+ .imean = imean, .isdev = isdev,
+ .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[, 1], .lmean, .emean )
+ }, list( .lmean = lmean, .emean = emean ))),
+ last = eval(substitute(expression({
+ misc$link <- c("mu" = .lmean , "sd" = .lsdev )
+ misc$earg <- list("mu" = .emean , "sd" = .esdev )
+
+
+ misc$expected = FALSE
+ }), list( .lmean = lmean, .lsdev = lsdev,
+ .emean = emean, .esdev = esdev ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE,eta, extra = NULL) {
+ sdev = eta2theta(eta[, 2], .lsdev)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ zedd = (y - mu) / sdev
+ NN = nrow(eta)
+ sum(w * (-log(sdev) - 0.5 * zedd^2)) -
+ sum(w[-NN] * pnorm(zedd[-NN], lower.tail = FALSE, log.p = TRUE))
}
- }, list( .lsd = lsd ))),
+ }, list( .lsdev = lsdev, .esdev = esdev ))),
vfamily = c("recnormal1"),
deriv = eval(substitute(expression({
NN = nrow(eta)
mymu = eta2theta(eta[, 1], .lmean)
- sd = eta2theta(eta[, 2], .lsd)
- zedd = (y - mymu) / sd
+ sdev = eta2theta(eta[, 2], .lsdev)
+ zedd = (y - mymu) / sdev
temp200 = dnorm(zedd) / (1-pnorm(zedd))
- dl.dmu = (zedd - temp200) / sd
- dl.dmu[NN] = zedd[NN] / sd[NN]
- dl.dsd = (-1 + zedd^2 - zedd * temp200) / sd
- dl.dsd[NN] = (-1 + zedd[NN]^2) / sd[NN]
- dmu.deta = dtheta.deta(mymu, .lmean)
- dsd.deta = dtheta.deta(sd, .lsd)
+ dl.dmu = (zedd - temp200) / sdev
+ dl.dmu[NN] = zedd[NN] / sdev[NN]
+ dl.dsd = (-1 + zedd^2 - zedd * temp200) / sdev
+ dl.dsd[NN] = (-1 + zedd[NN]^2) / sdev[NN]
+
+ dmu.deta = dtheta.deta(mymu, .lmean, .emean )
+ dsd.deta = dtheta.deta(sdev, .lsdev, .esdev )
+
if (iter == 1) {
etanew = eta
} else {
@@ -2369,7 +2761,8 @@ recnormal1.control <- function(save.weight = TRUE, ...)
derivnew = c(w) * cbind(dl.dmu * dmu.deta,
dl.dsd * dsd.deta)
derivnew
- }), list( .lmean = lmean, .lsd = lsd ))),
+ }), list( .lmean = lmean, .lsdev = lsdev,
+ .emean = emean, .esdev = esdev ))),
weight = expression({
if (iter == 1) {
wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
@@ -2387,77 +2780,95 @@ recnormal1.control <- function(save.weight = TRUE, ...)
recexp1.control <- function(save.weight = TRUE, ...)
{
- list(save.weight = save.weight)
+ list(save.weight = save.weight)
}
+
recexp1 <- function(lrate = "loge", irate = NULL, imethod = 1)
{
+ lrate <- as.list(substitute(lrate))
+ erate <- link2list(lrate)
+ lrate <- attr(erate, "function.name")
- if (mode(lrate) != "character" && mode(lrate) != "name")
- lrate = as.character(substitute(lrate))
- if (!is.Numeric(imethod, allowable.length = 1,
+
+
+ if (!is.Numeric(imethod, allowable.length = 1,
integer.valued = TRUE, positive = TRUE) ||
- imethod > 3.5)
- stop("argument 'imethod' must be 1 or 2 or 3")
+ imethod > 3.5)
+ stop("argument 'imethod' must be 1 or 2 or 3")
- new("vglmff",
- blurb = c("Upper record values from a ",
- "1-parameter exponential distribution\n\n",
- "Links: ",
- namesof("rate", lrate, tag = TRUE),
- "\n",
- "Variance: 1/rate^2"),
- initialize = eval(substitute(expression({
- predictors.names = c(namesof("rate", .lrate, tag = FALSE))
- if (ncol(y <- cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- if (any(diff(y) <= 0))
- stop("response must have increasingly larger and larger values")
- if (any(w != 1))
- warning("weights should have unit values only")
- if (!length(etastart)) {
- rate.init = if (length( .irate))
- rep( .irate, len = n) else {
- init.rate =
- if (.imethod == 1) length(y) / y[length(y), 1] else
- if (.imethod == 2) 1/mean(y) else 1/median(y)
- if (.lrate == "loge") pmax(1/1024, init.rate) else
- init.rate}
- etastart =
- cbind(theta2eta(rep(rate.init, len = n), .lrate))
- }
- }), list( .lrate = lrate, .irate = irate, .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta, .lrate)
- }, list( .lrate = lrate ))),
- last = eval(substitute(expression({
- misc$link = c("rate" = .lrate)
- misc$expected = TRUE
- }), list( .lrate = lrate ))),
- loglikelihood = eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra = NULL) {
- rate = eta2theta(eta, .lrate)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- NN = length(eta)
- y = cbind(y)
- sum(w * log(rate)) - w[NN] * rate[NN] * y[NN, 1]
- }
- }, list( .lrate = lrate ))),
- vfamily = c("recexp1"),
- deriv = eval(substitute(expression({
+
+
+ new("vglmff",
+ blurb = c("Upper record values from a ",
+ "1-parameter exponential distribution\n\n",
+ "Links: ",
+ namesof("rate", lrate, erate, tag = TRUE),
+ "\n",
+ "Variance: 1/rate^2"),
+ initialize = eval(substitute(expression({
+ predictors.names <-
+ c(namesof("rate", .lrate , .erate , tag = FALSE))
+
+ if (ncol(y <- cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ if (any(diff(y) <= 0))
+ stop("response must have increasingly larger and larger values")
+ if (any(w != 1))
+ warning("weights should have unit values only")
+
+
+ if (!length(etastart)) {
+ rate.init = if (length( .irate))
+ rep( .irate, len = n) else {
+ init.rate =
+ if (.imethod == 1) length(y) / y[length(y), 1] else
+ if (.imethod == 2) 1/mean(y) else 1/median(y)
+ if (.lrate == "loge") pmax(1/1024, init.rate) else
+ init.rate}
+
+ etastart =
+ cbind(theta2eta(rep(rate.init, len = n), .lrate , .erate ))
+ }
+ }), list( .lrate = lrate,
+ .erate = erate,
+ .irate = irate, .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta, .lrate , .erate )
+ }, list( .lrate = lrate, .erate = erate ))),
+ last = eval(substitute(expression({
+ misc$link <- c("rate" = .lrate)
+ misc$earg <- list("rate" = .erate)
+
+ misc$expected = TRUE
+ }), list( .lrate = lrate, .erate = erate ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE,eta, extra = NULL) {
+ rate = eta2theta(eta, .lrate , .erate )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
NN = length(eta)
- rate = c(eta2theta(eta, .lrate))
- dl.drate = 1 / rate
- dl.drate[NN] = 1/ rate[NN] - y[NN, 1]
- drate.deta = dtheta.deta(rate, .lrate)
- c(w) * cbind(dl.drate * drate.deta)
- }), list( .lrate = lrate ))),
- weight=expression({
- ed2l.drate2 = 1 / rate^2
- wz = drate.deta^2 * ed2l.drate2
- c(w) * wz
- }))
+ y = cbind(y)
+ sum(w * log(rate)) - w[NN] * rate[NN] * y[NN, 1]
+ }
+ }, list( .lrate = lrate, .erate = erate ))),
+ vfamily = c("recexp1"),
+ deriv = eval(substitute(expression({
+ NN = length(eta)
+ rate = c(eta2theta(eta, .lrate , .erate ))
+
+ dl.drate = 1 / rate
+ dl.drate[NN] = 1/ rate[NN] - y[NN, 1]
+
+ drate.deta = dtheta.deta(rate, .lrate , .erate )
+
+ c(w) * cbind(dl.drate * drate.deta)
+ }), list( .lrate = lrate, .erate = erate ))),
+ weight = expression({
+ ed2l.drate2 = 1 / rate^2
+ wz = drate.deta^2 * ed2l.drate2
+ c(w) * wz
+ }))
}
@@ -2469,116 +2880,128 @@ recexp1.control <- function(save.weight = TRUE, ...)
poissonp <- function(ostatistic, dimension = 2,
- link = "loge", earg = list(),
+ link = "loge",
idensity = NULL, imethod = 1) {
- if (!is.Numeric(ostatistic, positive = TRUE,
- allowable.length = 1, integer.valued = TRUE))
- stop("argument 'ostatistic' must be a single positive integer")
- if (!is.Numeric(dimension, positive = TRUE,
- allowable.length = 1, integer.valued = TRUE) ||
- dimension > 3)
- stop("argument 'dimension' must be 2 or 3")
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
-
- if (!is.list(earg)) earg = list()
- if (!is.Numeric(imethod, allowable.length = 1,
- positive = TRUE, integer.valued = TRUE) ||
- imethod > 2.5)
- stop("argument 'imethod' must be 1 or 2")
- if (length(idensity) &&
- !is.Numeric(idensity, positive = TRUE))
- stop("bad input for argument 'idensity'")
-
- new("vglmff",
- blurb = c(if (dimension == 2)
- "Poisson-points-on-a-plane distances distribution\n" else
- "Poisson-points-on-a-volume distances distribution\n",
- "Link: ",
- namesof("density", link, earg = earg), "\n\n",
- if (dimension == 2)
+ if (!is.Numeric(ostatistic, positive = TRUE,
+ allowable.length = 1, integer.valued = TRUE))
+ stop("argument 'ostatistic' must be a single positive integer")
+ if (!is.Numeric(dimension, positive = TRUE,
+ allowable.length = 1, integer.valued = TRUE) ||
+ dimension > 3)
+ stop("argument 'dimension' must be 2 or 3")
+
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ positive = TRUE, integer.valued = TRUE) ||
+ imethod > 2.5)
+ stop("argument 'imethod' must be 1 or 2")
+ if (length(idensity) &&
+ !is.Numeric(idensity, positive = TRUE))
+ stop("bad input for argument 'idensity'")
+
+ new("vglmff",
+ blurb = c(if (dimension == 2)
+ "Poisson-points-on-a-plane distances distribution\n" else
+ "Poisson-points-on-a-volume distances distribution\n",
+ "Link: ",
+ namesof("density", link, earg = earg), "\n\n",
+ if (dimension == 2)
"Mean: gamma(s+0.5) / (gamma(s) * sqrt(density * pi))" else
"Mean: gamma(s+1/3) / (gamma(s) * (4*density*pi/3)^(1/3))"),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- if (any(y <= 0))
- stop("response must contain positive values only")
- predictors.names =
- namesof("density", .link, earg = .earg, tag = FALSE)
- if (!length(etastart)) {
- use.this = if ( .imethod == 1) median(y) + 1/8 else
- weighted.mean(y,w)
- if ( .dimension == 2) {
- myratio = exp(lgamma( .ostatistic + 0.5) -
- lgamma( .ostatistic ))
- density.init = if (is.Numeric( .idensity ))
- rep( .idensity, len = n) else
- rep(myratio^2 / (pi * use.this^2), len = n)
- etastart = theta2eta(density.init, .link, earg = .earg)
- } else {
- myratio = exp(lgamma( .ostatistic +1/3) -
- lgamma( .ostatistic ))
- density.init = if (is.Numeric( .idensity ))
- rep( .idensity, len = n) else
- rep(3 * myratio^3 / (4 * pi * use.this^3), len = n)
- etastart = theta2eta(density.init, .link, earg = .earg)
- }
- }
- }), list( .link = link, .earg = earg, .ostatistic = ostatistic,
- .dimension = dimension, .imethod = imethod,
- .idensity = idensity ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- density = eta2theta(eta, .link, earg = .earg)
- if ( .dimension == 2) {
- myratio = exp(lgamma( .ostatistic +0.5) - lgamma( .ostatistic ))
- myratio / sqrt(density * pi)
- } else {
- myratio = exp(lgamma( .ostatistic +1/3) - lgamma( .ostatistic))
- myratio / (4 * density * pi/3)^(1/3)
- }
- }, list( .link = link, .earg = earg, .ostatistic = ostatistic,
- .dimension = dimension ))),
- last = eval(substitute(expression({
- misc$link = c("density" = .link)
- misc$earg = list("density" = .earg)
- misc$expected = TRUE
- misc$ostatistic = .ostatistic
- misc$dimension = .dimension
- }), list( .link = link, .earg = earg, .ostatistic = ostatistic,
- .dimension = dimension ))),
- loglikelihood = eval(substitute(
- function(mu,y,w,residuals= FALSE,eta, extra = NULL) {
- density = eta2theta(eta, .link, earg = .earg)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- if ( .dimension == 2)
- sum(w * (log(2) + .ostatistic * log(pi * density) -
- lgamma( .ostatistic) + (2* .ostatistic-1) * log(y) -
- density * pi * y^2)) else
- sum(w * (log(3) + .ostatistic * log(4*pi * density/3) -
- lgamma( .ostatistic) + (3* .ostatistic-1) * log(y) -
- (4/3) * density * pi * y^3))
- }, list( .link = link, .earg = earg, .ostatistic = ostatistic,
- .dimension = dimension ))),
- vfamily = c("poissonp"),
- deriv = eval(substitute(expression({
- density = eta2theta(eta, .link, earg = .earg)
+ initialize = eval(substitute(expression({
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+ if (any(y <= 0))
+ stop("response must contain positive values only")
+
+
+
+ predictors.names <-
+ namesof("density", .link, earg = .earg, tag = FALSE)
+
+
+
+ if (!length(etastart)) {
+ use.this = if ( .imethod == 1) median(y) + 1/8 else
+ weighted.mean(y,w)
if ( .dimension == 2) {
- dl.ddensity = .ostatistic / density - pi * y^2
+ myratio = exp(lgamma( .ostatistic + 0.5) -
+ lgamma( .ostatistic ))
+ density.init = if (is.Numeric( .idensity ))
+ rep( .idensity, len = n) else
+ rep(myratio^2 / (pi * use.this^2), len = n)
+ etastart = theta2eta(density.init, .link, earg = .earg)
} else {
- dl.ddensity = .ostatistic / density - (4/3) * pi * y^3
+ myratio = exp(lgamma( .ostatistic +1/3) -
+ lgamma( .ostatistic ))
+ density.init = if (is.Numeric( .idensity ))
+ rep( .idensity, len = n) else
+ rep(3 * myratio^3 / (4 * pi * use.this^3), len = n)
+ etastart = theta2eta(density.init, .link, earg = .earg)
}
- ddensity.deta = dtheta.deta(density, .link, earg = .earg)
- w * dl.ddensity * ddensity.deta
- }), list( .link = link, .earg = earg, .ostatistic = ostatistic,
- .dimension = dimension ))),
- weight = eval(substitute(expression({
- ed2l.ddensity2 = .ostatistic / density^2
- wz = ddensity.deta^2 * ed2l.ddensity2
- c(w) * wz
- }), list( .link = link, .earg = earg, .ostatistic = ostatistic,
- .dimension = dimension ))))
+ }
+ }), list( .link = link, .earg = earg, .ostatistic = ostatistic,
+ .dimension = dimension, .imethod = imethod,
+ .idensity = idensity ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ density = eta2theta(eta, .link, earg = .earg)
+ if ( .dimension == 2) {
+ myratio = exp(lgamma( .ostatistic +0.5) - lgamma( .ostatistic ))
+ myratio / sqrt(density * pi)
+ } else {
+ myratio = exp(lgamma( .ostatistic +1/3) - lgamma( .ostatistic))
+ myratio / (4 * density * pi/3)^(1/3)
+ }
+ }, list( .link = link, .earg = earg, .ostatistic = ostatistic,
+ .dimension = dimension ))),
+ last = eval(substitute(expression({
+ misc$link = c("density" = .link)
+ misc$earg = list("density" = .earg)
+ misc$expected = TRUE
+ misc$ostatistic = .ostatistic
+ misc$dimension = .dimension
+ }), list( .link = link, .earg = earg, .ostatistic = ostatistic,
+ .dimension = dimension ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE,eta, extra = NULL) {
+ density = eta2theta(eta, .link, earg = .earg)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ if ( .dimension == 2)
+ sum(w * (log(2) + .ostatistic * log(pi * density) -
+ lgamma( .ostatistic) + (2* .ostatistic-1) * log(y) -
+ density * pi * y^2)) else
+ sum(w * (log(3) + .ostatistic * log(4*pi * density/3) -
+ lgamma( .ostatistic) + (3* .ostatistic-1) * log(y) -
+ (4/3) * density * pi * y^3))
+ }, list( .link = link, .earg = earg, .ostatistic = ostatistic,
+ .dimension = dimension ))),
+ vfamily = c("poissonp"),
+ deriv = eval(substitute(expression({
+ density = eta2theta(eta, .link, earg = .earg)
+
+ if ( .dimension == 2) {
+ dl.ddensity = .ostatistic / density - pi * y^2
+ } else {
+ dl.ddensity = .ostatistic / density - (4/3) * pi * y^3
+ }
+
+ ddensity.deta = dtheta.deta(density, .link, earg = .earg)
+
+ c(w) * dl.ddensity * ddensity.deta
+ }), list( .link = link, .earg = earg, .ostatistic = ostatistic,
+ .dimension = dimension ))),
+ weight = eval(substitute(expression({
+ ned2l.ddensity2 = .ostatistic / density^2
+ wz = ddensity.deta^2 * ned2l.ddensity2
+ c(w) * wz
+ }), list( .link = link, .earg = earg, .ostatistic = ostatistic,
+ .dimension = dimension ))))
}
diff --git a/R/family.genetic.R b/R/family.genetic.R
index 72fa69d..c08c125 100644
--- a/R/family.genetic.R
+++ b/R/family.genetic.R
@@ -13,44 +13,47 @@
- G1G2G3 = function(link = "logit", earg = list(),
- ip1 = NULL, ip2 = NULL, iF = NULL)
+ G1G2G3 <- function(link = "logit",
+ ip1 = NULL, ip2 = NULL, iF = NULL)
{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
- new("vglmff",
- blurb = c("G1-G2-G3 phenotype\n\n",
- "Links: ",
- namesof("p1", link, earg = earg), ", ",
- namesof("p2", link, earg = earg), ", ",
- namesof("f", link, earg = earg, tag = FALSE)),
- deviance = Deviance.categorical.data.vgam,
- initialize = eval(substitute(expression({
- mustart.orig = mustart
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+
+ new("vglmff",
+ blurb = c("G1-G2-G3 phenotype\n\n",
+ "Links: ",
+ namesof("p1", link, earg = earg), ", ",
+ namesof("p2", link, earg = earg), ", ",
+ namesof("f", link, earg = earg, tag = FALSE)),
+ deviance = Deviance.categorical.data.vgam,
+ initialize = eval(substitute(expression({
+ mustart.orig = mustart
- delete.zero.colns = FALSE
- eval(process.categorical.data.vgam)
+ delete.zero.colns = FALSE
+ eval(process.categorical.data.vgam)
if (length(mustart.orig))
mustart = mustart.orig
- ok.col.ny = c("G1G1","G1G2","G1G3","G2G2","G2G3","G3G3")
- if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
- setequal(ok.col.ny, col.ny)) {
- if (!all(ok.col.ny == col.ny))
- stop("the columns of the response matrix should have ",
- "names (output of colnames()) ordered as ",
- "c('G1G1','G1G2','G1G3','G2G2','G2G3','G3G3')")
- }
+ ok.col.ny = c("G1G1","G1G2","G1G3","G2G2","G2G3","G3G3")
+ if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
+ setequal(ok.col.ny, col.ny)) {
+ if (!all(ok.col.ny == col.ny))
+ stop("the columns of the response matrix should have ",
+ "names (output of colnames()) ordered as ",
+ "c('G1G1','G1G2','G1G3','G2G2','G2G3','G3G3')")
+ }
- predictors.names =
- c(namesof("p1", .link , earg = .earg , tag = FALSE),
- namesof("p2", .link , earg = .earg , tag = FALSE),
- namesof("f", .link , earg = .earg , tag = FALSE))
+ predictors.names <-
+ c(namesof("p1", .link , earg = .earg , tag = FALSE),
+ namesof("p2", .link , earg = .earg , tag = FALSE),
+ namesof("f", .link , earg = .earg , tag = FALSE))
- if (is.null(etastart)) {
+ if (is.null(etastart)) {
@@ -72,23 +75,23 @@
etastart = cbind(theta2eta(p1, .link , earg = .earg ),
theta2eta(p2, .link , earg = .earg ),
theta2eta(ff, .link , earg = .earg ))
- mustart <- NULL # Since etastart has been computed.
+ mustart <- NULL # Since etastart has been computed.
- }
- }), list( .link = link, .ip1 = ip1, .ip2 = ip2, .iF = iF,
- .earg = earg))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- p1 = eta2theta(eta[, 1], link = .link , earg = .earg )
- p2 = eta2theta(eta[, 2], link = .link , earg = .earg )
- f = eta2theta(eta[, 3], link = .link , earg = .earg )
- p3 = abs(1 - p1 - p2)
- cbind("G1G1" = f*p1+(1-f)*p1^2,
- "G1G2" = 2*p1*p2*(1-f),
- "G1G3" = 2*p1*p3*(1-f),
- "G2G2" = f*p2+(1-f)*p2^2,
- "G2G3" = 2*p2*p3*(1-f),
- "G3G3" = f*p3+(1-f)*p3^2)
- }, list( .link = link, .earg = earg))),
+ }
+ }), list( .link = link, .ip1 = ip1, .ip2 = ip2, .iF = iF,
+ .earg = earg))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ p1 = eta2theta(eta[, 1], link = .link , earg = .earg )
+ p2 = eta2theta(eta[, 2], link = .link , earg = .earg )
+ f = eta2theta(eta[, 3], link = .link , earg = .earg )
+ p3 = abs(1 - p1 - p2)
+ cbind("G1G1" = f*p1+(1-f)*p1^2,
+ "G1G2" = 2*p1*p2*(1-f),
+ "G1G3" = 2*p1*p3*(1-f),
+ "G2G2" = f*p2+(1-f)*p2^2,
+ "G2G3" = 2*p2*p3*(1-f),
+ "G3G3" = f*p3+(1-f)*p3^2)
+ }, list( .link = link, .earg = earg))),
last = eval(substitute(expression({
misc$link = c(p1 = .link , p2 = .link , f = .link )
@@ -96,103 +99,104 @@
misc$expected = TRUE
}), list( .link = link, .earg = earg))),
- loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x = w * y, size = w, prob = mu,
- log = TRUE, dochecking = FALSE))
- },
- vfamily = c("G1G2G3", "vgenetic"),
- deriv = eval(substitute(expression({
- p1 = eta2theta(eta[, 1], link = .link , earg = .earg )
- p2 = eta2theta(eta[, 2], link = .link , earg = .earg )
- p3 = 1-p1-p2
- f = eta2theta(eta[, 3], link = .link , earg = .earg )
- dP1 = cbind(f + 2*p1*(1-f), 2*(1-f)*p2, 2*(1-f)*(1-p2-2*p1),
- 0, -2*(1-f)*p2, -f - 2*p3*(1-f))
- dP2 = cbind(0, 2*p1*(1-f), -2*(1-f)*p1, f+2*p2*(1-f),
- 2*(1-f)*(1-p1-2*p2), -f - 2*p3*(1-f))
- dP3 = cbind(p1*(1-p1), -2*p1*p2, -2*p1*p3, p2*(1-p2), -2*p2*p3,
- p3*(1-p3))
- dl1 = rowSums(y * dP1 / mu)
- dl2 = rowSums(y * dP2 / mu)
- dl3 = rowSums(y * dP3 / mu)
- dPP.deta = dtheta.deta(cbind(p1, p2, f), link = .link , earg = .earg )
- c(w) * cbind(dPP.deta[, 1] * dl1,
- dPP.deta[, 2] * dl2,
- dPP.deta[, 3] * dl3)
- }), list( .link = link, .earg = earg))),
- weight = eval(substitute(expression({
- dPP = array(c(dP1,dP2,dP3), c(n,6, 3))
-
- wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==6 because M==3
- for(i1 in 1:M)
- for(i2 in i1:M) {
- index = iam(i1,i2, M)
- wz[,index] = rowSums(dPP[, , i1, drop = TRUE] *
- dPP[, , i2, drop = TRUE] / mu) *
- dPP.deta[, i1] * dPP.deta[, i2]
- }
- c(w) * wz
- }), list( .link = link, .earg = earg))))
+ loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x = w * y, size = w, prob = mu,
+ log = TRUE, dochecking = FALSE))
+ },
+ vfamily = c("G1G2G3", "vgenetic"),
+ deriv = eval(substitute(expression({
+ p1 = eta2theta(eta[, 1], link = .link , earg = .earg )
+ p2 = eta2theta(eta[, 2], link = .link , earg = .earg )
+ p3 = 1-p1-p2
+ f = eta2theta(eta[, 3], link = .link , earg = .earg )
+ dP1 = cbind(f + 2*p1*(1-f), 2*(1-f)*p2, 2*(1-f)*(1-p2-2*p1),
+ 0, -2*(1-f)*p2, -f - 2*p3*(1-f))
+ dP2 = cbind(0, 2*p1*(1-f), -2*(1-f)*p1, f+2*p2*(1-f),
+ 2*(1-f)*(1-p1-2*p2), -f - 2*p3*(1-f))
+ dP3 = cbind(p1*(1-p1), -2*p1*p2, -2*p1*p3, p2*(1-p2), -2*p2*p3,
+ p3*(1-p3))
+ dl1 = rowSums(y * dP1 / mu)
+ dl2 = rowSums(y * dP2 / mu)
+ dl3 = rowSums(y * dP3 / mu)
+ dPP.deta = dtheta.deta(cbind(p1, p2, f), link = .link , earg = .earg )
+ c(w) * cbind(dPP.deta[, 1] * dl1,
+ dPP.deta[, 2] * dl2,
+ dPP.deta[, 3] * dl3)
+ }), list( .link = link, .earg = earg))),
+ weight = eval(substitute(expression({
+ dPP = array(c(dP1,dP2,dP3), c(n,6, 3))
+
+ wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==6 because M==3
+ for(i1 in 1:M)
+ for(i2 in i1:M) {
+ index = iam(i1,i2, M)
+ wz[,index] = rowSums(dPP[, , i1, drop = TRUE] *
+ dPP[, , i2, drop = TRUE] / mu) *
+ dPP.deta[, i1] * dPP.deta[, i2]
+ }
+ c(w) * wz
+ }), list( .link = link, .earg = earg))))
}
- AAaa.nohw = function(link = "logit", earg = list(), ipA = NULL, iF = NULL)
+ AAaa.nohw <- function(link = "logit", ipA = NULL, iF = NULL)
{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
- new("vglmff",
- blurb = c("AA-Aa-aa phenotype (without Hardy-Weinberg assumption)\n\n",
- "Links: ",
- namesof("pA", link, earg = earg), ", ",
- namesof("f", "identity", tag = FALSE)),
- deviance = Deviance.categorical.data.vgam,
- initialize = eval(substitute(expression({
- mustart.orig = mustart
- delete.zero.colns = FALSE
- eval(process.categorical.data.vgam)
+ new("vglmff",
+ blurb = c("AA-Aa-aa phenotype (without Hardy-Weinberg assumption)\n\n",
+ "Links: ",
+ namesof("pA", link, earg = earg), ", ",
+ namesof("f", "identity", tag = FALSE)),
+ deviance = Deviance.categorical.data.vgam,
+ initialize = eval(substitute(expression({
+ mustart.orig = mustart
- if (length(mustart.orig))
- mustart = mustart.orig
+ delete.zero.colns = FALSE
+ eval(process.categorical.data.vgam)
- ok.col.ny = c("AA","Aa","aa")
- if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
- setequal(ok.col.ny, col.ny)) {
- if (!all(ok.col.ny == col.ny))
- stop("the columns of the response matrix should have names ",
- "(output of colnames()) ordered as c('AA','Aa','aa')")
- }
+ if (length(mustart.orig))
+ mustart = mustart.orig
- predictors.names =
- c(namesof("pA", .link , earg = .earg , tag = FALSE),
- namesof("f", "identity", earg = list(), tag = FALSE))
+ ok.col.ny = c("AA","Aa","aa")
+ if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
+ setequal(ok.col.ny, col.ny)) {
+ if (!all(ok.col.ny == col.ny))
+ stop("the columns of the response matrix should have names ",
+ "(output of colnames()) ordered as c('AA','Aa','aa')")
+ }
- if (is.null(etastart)) {
- pA = if (is.numeric( .ipA )) rep( .ipA , len = n) else
- c(sqrt(mustart[, 1] - mustart[, 2] / 2))
- f = if (is.numeric( .iF )) rep( .iF , len = n) else
- rep(0.01, len = n) # 1- mustart[, 2]/(2*pA*(1-pA))
- if (any(pA <= 0) || any(pA >= 1))
- stop("bad initial value for 'pA'")
- etastart = cbind(theta2eta(pA, .link , earg = .earg ),
- theta2eta(f, "identity"))
- mustart <- NULL # Since etastart has been computed.
- }
- }), list( .link = link, .ipA = ipA, .iF = iF, .earg = earg))),
+ predictors.names <-
+ c(namesof("pA", .link , earg = .earg , tag = FALSE),
+ namesof("f", "identity", earg = list(), tag = FALSE))
+
+ if (is.null(etastart)) {
+ pA = if (is.numeric( .ipA )) rep( .ipA , len = n) else
+ c(sqrt(mustart[, 1] - mustart[, 2] / 2))
+ f = if (is.numeric( .iF )) rep( .iF , len = n) else
+ rep(0.01, len = n) # 1- mustart[, 2]/(2*pA*(1-pA))
+ if (any(pA <= 0) || any(pA >= 1))
+ stop("bad initial value for 'pA'")
+ etastart = cbind(theta2eta(pA, .link , earg = .earg ),
+ theta2eta(f, "identity"))
+ mustart <- NULL # Since etastart has been computed.
+ }
+ }), list( .link = link, .ipA = ipA, .iF = iF, .earg = earg))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- pA = eta2theta(eta[, 1], link = .link , earg = .earg )
- f = eta2theta(eta[, 2], link = "identity", earg = list())
- cbind(AA = pA^2+pA*(1-pA)*f,
- Aa = 2*pA*(1-pA)*(1-f),
- aa = (1-pA)^2 + pA*(1-pA)*f)
- }, list( .link = link, .earg = earg))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ pA = eta2theta(eta[, 1], link = .link , earg = .earg )
+ f = eta2theta(eta[, 2], link = "identity", earg = list())
+ cbind(AA = pA^2+pA*(1-pA)*f,
+ Aa = 2*pA*(1-pA)*(1-f),
+ aa = (1-pA)^2 + pA*(1-pA)*f)
+ }, list( .link = link, .earg = earg))),
last = eval(substitute(expression({
misc$link = c(pA = .link , f = "identity")
@@ -201,64 +205,68 @@
}), list( .link = link, .earg = earg))),
- loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x = w * y, size = w, prob = mu,
- log = TRUE, dochecking = FALSE))
- },
- vfamily = c("AAaa.nohw", "vgenetic"),
- deriv = eval(substitute(expression({
- pA = eta2theta(eta[, 1], link = .link , earg = .earg )
- f = eta2theta(eta[, 2], link = "identity")
- dP1 = cbind(f + 2*pA*(1-f),
- 2*(1-f)*(1-2*pA),
- -2*(1-pA) +f*(1-2*pA))
- dP2 = cbind(pA*(1-pA),
- -2*pA*(1-pA),
- pA*(1-pA))
- dl1 = rowSums(y * dP1 / mu)
- dl2 = rowSums(y * dP2 / mu)
- dPP.deta = dtheta.deta(pA, link = .link , earg = .earg )
- c(w) * cbind(dPP.deta * dl1,
- dl2)
- }), list( .link = link, .earg = earg))),
- weight = eval(substitute(expression({
- dPP = array(c(dP1, dP2), c(n, 3, 2))
- dPP.deta = cbind(dtheta.deta(pA, link = .link , earg = .earg ),
- dtheta.deta(f, link = "identity"))
- wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
- for(i1 in 1:M)
- for(i2 in i1:M) {
- index = iam(i1,i2, M)
- wz[,index] = rowSums(dPP[,,i1,drop = TRUE] *
- dPP[,,i2,drop = TRUE] / mu) *
- dPP.deta[,i1] * dPP.deta[,i2]
- }
- c(w) * wz
- }), list( .link = link, .earg = earg))))
+ loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x = w * y, size = w, prob = mu,
+ log = TRUE, dochecking = FALSE))
+ },
+ vfamily = c("AAaa.nohw", "vgenetic"),
+ deriv = eval(substitute(expression({
+ pA = eta2theta(eta[, 1], link = .link , earg = .earg )
+ f = eta2theta(eta[, 2], link = "identity")
+ dP1 = cbind(f + 2*pA*(1-f),
+ 2*(1-f)*(1-2*pA),
+ -2*(1-pA) +f*(1-2*pA))
+ dP2 = cbind(pA*(1-pA),
+ -2*pA*(1-pA),
+ pA*(1-pA))
+ dl1 = rowSums(y * dP1 / mu)
+ dl2 = rowSums(y * dP2 / mu)
+
+ dPP.deta = dtheta.deta(pA, link = .link , earg = .earg )
+
+ c(w) * cbind(dPP.deta * dl1,
+ dl2)
+ }), list( .link = link, .earg = earg))),
+ weight = eval(substitute(expression({
+ dPP = array(c(dP1, dP2), c(n, 3, 2))
+ dPP.deta = cbind(dtheta.deta(pA, link = .link , earg = .earg ),
+ dtheta.deta(f, link = "identity"))
+ wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
+ for(i1 in 1:M)
+ for(i2 in i1:M) {
+ index = iam(i1,i2, M)
+ wz[,index] = rowSums(dPP[,,i1,drop = TRUE] *
+ dPP[,,i2,drop = TRUE] / mu) *
+ dPP.deta[,i1] * dPP.deta[,i2]
+ }
+ c(w) * wz
+ }), list( .link = link, .earg = earg))))
}
- AB.Ab.aB.ab2 = function(link = "logit", earg = list(), init.p = NULL)
+ AB.Ab.aB.ab2 <- function(link = "logit", init.p = NULL)
{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
- new("vglmff",
- blurb = c("AB-Ab-aB-ab2 phenotype\n\n",
- "Links: ",
- namesof("p", link, earg = earg)),
- deviance = Deviance.categorical.data.vgam,
- initialize = eval(substitute(expression({
- mustart.orig = mustart
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
- delete.zero.colns = FALSE
- eval(process.categorical.data.vgam)
- predictors.names = namesof("p", .link , earg = .earg , tag = FALSE)
+ new("vglmff",
+ blurb = c("AB-Ab-aB-ab2 phenotype\n\n",
+ "Links: ",
+ namesof("p", link, earg = earg)),
+ deviance = Deviance.categorical.data.vgam,
+ initialize = eval(substitute(expression({
+ mustart.orig = mustart
+
+ delete.zero.colns = FALSE
+ eval(process.categorical.data.vgam)
+ predictors.names <- namesof("p", .link , earg = .earg , tag = FALSE)
if (length(mustart.orig))
mustart = mustart.orig
@@ -296,46 +304,47 @@
loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x = w * y, size = w, prob = mu,
- log = TRUE, dochecking = FALSE))
- },
- vfamily = c("AB.Ab.aB.ab2", "vgenetic"),
- deriv = eval(substitute(expression({
- pp = eta2theta(eta, link = .link , earg = .earg )
- dP1 = cbind(-0.5*(1-pp),
- 0.5*(1-pp),
- 0.5*(1-pp),
- -0.5*(1-pp))
- dl1 = rowSums(y * dP1 / mu)
- dPP.deta = dtheta.deta(pp, link = .link , earg = .earg )
- c(w) * dPP.deta * dl1
- }), list( .link = link, .earg = earg) )),
- weight = eval(substitute(expression({
- wz = rowSums(dP1 * dP1 / mu) * dPP.deta^2
- c(w) * wz
- }), list( .link = link, .earg = earg) )))
+ sum(dmultinomial(x = w * y, size = w, prob = mu,
+ log = TRUE, dochecking = FALSE))
+ },
+ vfamily = c("AB.Ab.aB.ab2", "vgenetic"),
+ deriv = eval(substitute(expression({
+ pp = eta2theta(eta, link = .link , earg = .earg )
+ dP1 = cbind(-0.5*(1-pp),
+ 0.5*(1-pp),
+ 0.5*(1-pp),
+ -0.5*(1-pp))
+ dl1 = rowSums(y * dP1 / mu)
+ dPP.deta = dtheta.deta(pp, link = .link , earg = .earg )
+ c(w) * dPP.deta * dl1
+ }), list( .link = link, .earg = earg) )),
+ weight = eval(substitute(expression({
+ wz = rowSums(dP1 * dP1 / mu) * dPP.deta^2
+ c(w) * wz
+ }), list( .link = link, .earg = earg) )))
}
- A1A2A3 = function(link = "logit", earg = list(), ip1 = NULL, ip2 = NULL)
+ A1A2A3 <- function(link = "logit", ip1 = NULL, ip2 = NULL)
{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
- new("vglmff",
- blurb = c("A1A2A3 Allele System ",
- "(A1A1, A1A2, A2A2, A1A3, A2A3, A3A3)\n\n",
- "Links: ",
- namesof("p1", link, earg = earg), ", ",
- namesof("p2", link, earg = earg, tag = FALSE)),
- deviance = Deviance.categorical.data.vgam,
- initialize = eval(substitute(expression({
- mustart.orig = mustart
- delete.zero.colns = FALSE
- eval(process.categorical.data.vgam)
+ new("vglmff",
+ blurb = c("A1A2A3 Allele System ",
+ "(A1A1, A1A2, A2A2, A1A3, A2A3, A3A3)\n\n",
+ "Links: ",
+ namesof("p1", link, earg = earg), ", ",
+ namesof("p2", link, earg = earg, tag = FALSE)),
+ deviance = Deviance.categorical.data.vgam,
+ initialize = eval(substitute(expression({
+ mustart.orig = mustart
+
+ delete.zero.colns = FALSE
+ eval(process.categorical.data.vgam)
if (length(mustart.orig))
mustart = mustart.orig
@@ -349,7 +358,7 @@
"c('A1A1','A1A2','A2A2','A1A3','A2A3','A3A3')")
}
- predictors.names =
+ predictors.names <-
c(namesof("pA", .link , earg = .earg , tag = FALSE),
namesof("pB", .link , earg = .earg , tag = FALSE))
@@ -361,19 +370,19 @@
etastart = cbind(theta2eta(p1, .link , earg = .earg ),
theta2eta(p2, .link , earg = .earg ))
mustart <- NULL # Since etastart has been computed.
- }
- }), list( .link = link, .ip1 = ip1, .ip2 = ip2, .earg = earg))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- p1 = eta2theta(eta[, 1], link = .link , earg = .earg )
- p2 = eta2theta(eta[, 2], link = .link , earg = .earg )
- qq = abs(1 - p1 - p2)
- cbind(A1A1 = p1*p1,
- A1A2 = 2*p1*p2,
- A2A2 = p2*p2,
- A1A3 = 2*p1*qq,
- A2A3 = 2*p2*qq,
- A3A3 = qq*qq)
- }, list( .link = link, .earg = earg))),
+ }
+ }), list( .link = link, .ip1 = ip1, .ip2 = ip2, .earg = earg))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ p1 = eta2theta(eta[, 1], link = .link , earg = .earg )
+ p2 = eta2theta(eta[, 2], link = .link , earg = .earg )
+ qq = abs(1 - p1 - p2)
+ cbind(A1A1 = p1*p1,
+ A1A2 = 2*p1*p2,
+ A2A2 = p2*p2,
+ A1A3 = 2*p1*qq,
+ A2A3 = 2*p2*qq,
+ A3A3 = qq*qq)
+ }, list( .link = link, .earg = earg))),
last = eval(substitute(expression({
misc$link = c(p1 = .link , p2 = .link )
@@ -382,58 +391,63 @@
}), list( .link = link, .earg = earg))),
- loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x = w * y, size = w, prob = mu,
- log = TRUE, dochecking = FALSE))
- },
- vfamily = c("A1A2A3", "vgenetic"),
- deriv = eval(substitute(expression({
- p1 = eta2theta(eta[, 1], link = .link , earg = .earg )
- p2 = eta2theta(eta[, 2], link = .link , earg = .earg )
- dl.dp1 = (2*y[, 1]+y[, 2]+y[, 4])/p1 - (2*y[,6]+y[, 4]+y[,5])/(1-p1-p2)
- dl.dp2 = (2*y[, 3]+y[, 2]+y[,5])/p2 - (2*y[,6]+y[, 4]+y[,5])/(1-p1-p2)
- dp1.deta = dtheta.deta(p1, link = .link , earg = .earg )
- dp2.deta = dtheta.deta(p2, link = .link , earg = .earg )
- c(w) * cbind(dl.dp1 * dp1.deta,
- dl.dp2 * dp2.deta)
- }), list( .link = link, .earg = earg))),
- weight = eval(substitute(expression({
- qq = 1-p1-p2
- wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
- ed2l.dp12 = 2 * (1/p1 + 1/qq)
- ed2l.dp22 = 2 * (1/p2 + 1/qq)
- ed2l.dp1dp2 = 2 / qq
- wz[, iam(1, 1, M)] = dp1.deta^2 * ed2l.dp12
- wz[, iam(2, 2, M)] = dp2.deta^2 * ed2l.dp22
- wz[, iam(1, 2, M)] = ed2l.dp1dp2 * dp1.deta * dp2.deta
- c(w) * wz
- }), list( .link = link, .earg = earg))))
+ loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x = w * y, size = w, prob = mu,
+ log = TRUE, dochecking = FALSE))
+ },
+ vfamily = c("A1A2A3", "vgenetic"),
+ deriv = eval(substitute(expression({
+ p1 = eta2theta(eta[, 1], link = .link , earg = .earg )
+ p2 = eta2theta(eta[, 2], link = .link , earg = .earg )
+
+ dl.dp1 = (2*y[, 1]+y[, 2]+y[, 4])/p1 - (2*y[,6]+y[, 4]+y[,5])/(1-p1-p2)
+ dl.dp2 = (2*y[, 3]+y[, 2]+y[,5])/p2 - (2*y[,6]+y[, 4]+y[,5])/(1-p1-p2)
+
+ dp1.deta = dtheta.deta(p1, link = .link , earg = .earg )
+ dp2.deta = dtheta.deta(p2, link = .link , earg = .earg )
+
+ c(w) * cbind(dl.dp1 * dp1.deta,
+ dl.dp2 * dp2.deta)
+ }), list( .link = link, .earg = earg))),
+ weight = eval(substitute(expression({
+ qq = 1-p1-p2
+ wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==3 because M==2
+ ed2l.dp12 = 2 * (1/p1 + 1/qq)
+ ed2l.dp22 = 2 * (1/p2 + 1/qq)
+ ed2l.dp1dp2 = 2 / qq
+ wz[, iam(1, 1, M)] = dp1.deta^2 * ed2l.dp12
+ wz[, iam(2, 2, M)] = dp2.deta^2 * ed2l.dp22
+ wz[, iam(1, 2, M)] = ed2l.dp1dp2 * dp1.deta * dp2.deta
+ c(w) * wz
+ }), list( .link = link, .earg = earg))))
}
- MNSs = function(link = "logit", earg = list(),
- imS = NULL, ims = NULL, inS = NULL)
+ MNSs <- function(link = "logit",
+ imS = NULL, ims = NULL, inS = NULL)
{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
- new("vglmff",
- blurb = c("MNSs Blood Group System (MS-Ms-MNS-MNs-NS-Ns phenotype)\n\n",
- "Links: ",
- namesof("mS", link, earg = earg), ", ",
- namesof("ms", link, earg = earg), ", ",
- namesof("nS", link, earg = earg, tag = FALSE)),
- deviance = Deviance.categorical.data.vgam,
- initialize = eval(substitute(expression({
- mustart.orig = mustart
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
- delete.zero.colns = FALSE
- eval(process.categorical.data.vgam)
+ new("vglmff",
+ blurb = c("MNSs Blood Group System (MS-Ms-MNS-MNs-NS-Ns phenotype)\n\n",
+ "Links: ",
+ namesof("mS", link, earg = earg), ", ",
+ namesof("ms", link, earg = earg), ", ",
+ namesof("nS", link, earg = earg, tag = FALSE)),
+ deviance = Deviance.categorical.data.vgam,
+ initialize = eval(substitute(expression({
+ mustart.orig = mustart
+
+ delete.zero.colns = FALSE
+ eval(process.categorical.data.vgam)
if (length(mustart.orig))
mustart = mustart.orig
@@ -441,47 +455,48 @@
ok.col.ny = c("MS","Ms","MNS","MNs","NS","Ns")
if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
setequal(ok.col.ny, col.ny)) {
- if (!all(ok.col.ny == col.ny))
- stop("the columns of the response matrix should have ",
- "names (output of colnames()) ordered as ",
- "c('MS','Ms','MNS','MNs','NS','Ns')")
- }
-
- predictors.names <-
- c(namesof("mS", .link , earg = .earg , tag = FALSE),
- namesof("ms", .link , earg = .earg , tag = FALSE),
- namesof("nS", .link , earg = .earg , tag = FALSE))
+ if (!all(ok.col.ny == col.ny))
+ stop("the columns of the response matrix should have ",
+ "names (output of colnames()) ordered as ",
+ "c('MS','Ms','MNS','MNs','NS','Ns')")
+ }
- if (is.null(etastart)) {
- ms = if (is.numeric(.ims)) rep(.ims, n) else
- c(sqrt(mustart[, 2]))
- ns = c(sqrt(mustart[,6]))
- nS = if (is.numeric(.inS)) rep(.inS, n) else
- c(-ns + sqrt(ns^2 + mustart[,5])) # Solve a quadratic eqn
- mS = if (is.numeric(.imS)) rep(.imS, n) else
- 1-ns-ms-nS
- etastart = cbind(theta2eta(mS, .link , earg = .earg ),
- theta2eta(ms, .link , earg = .earg ),
- theta2eta(nS, .link , earg = .earg ))
- mustart <- NULL # Since etastart has been computed.
- }
- }), list( .link = link, .imS = imS, .ims = ims, .inS = inS, .earg = earg))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- mS = eta2theta(eta[, 1], link = .link , earg = .earg )
- ms = eta2theta(eta[, 2], link = .link , earg = .earg )
- nS = eta2theta(eta[, 3], link = .link , earg = .earg )
- ns = abs(1 - mS - ms - nS)
- cbind(MS = mS^2 + 2*mS*ms,
- Ms = ms^2,
- MNS = 2*(mS*nS + ms*nS + mS*ns),
- MNs = 2*ms*ns,
- NS = nS^2 + 2*nS*ns,
- Ns = ns^2)
- }, list( .link = link, .earg = earg))),
+ predictors.names <-
+ c(namesof("mS", .link , earg = .earg , tag = FALSE),
+ namesof("ms", .link , earg = .earg , tag = FALSE),
+ namesof("nS", .link , earg = .earg , tag = FALSE))
+
+ if (is.null(etastart)) {
+ ms = if (is.numeric(.ims)) rep(.ims, n) else
+ c(sqrt(mustart[, 2]))
+ ns = c(sqrt(mustart[,6]))
+ nS = if (is.numeric(.inS)) rep(.inS, n) else
+ c(-ns + sqrt(ns^2 + mustart[,5])) # Solve a quadratic eqn
+ mS = if (is.numeric(.imS)) rep(.imS, n) else
+ 1-ns-ms-nS
+ etastart = cbind(theta2eta(mS, .link , earg = .earg ),
+ theta2eta(ms, .link , earg = .earg ),
+ theta2eta(nS, .link , earg = .earg ))
+ mustart <- NULL # Since etastart has been computed.
+ }
+ }), list( .link = link, .imS = imS, .ims = ims, .inS = inS, .earg = earg))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ mS = eta2theta(eta[, 1], link = .link , earg = .earg )
+ ms = eta2theta(eta[, 2], link = .link , earg = .earg )
+ nS = eta2theta(eta[, 3], link = .link , earg = .earg )
+ ns = abs(1 - mS - ms - nS)
+ cbind(MS = mS^2 + 2*mS*ms,
+ Ms = ms^2,
+ MNS = 2*(mS*nS + ms*nS + mS*ns),
+ MNs = 2*ms*ns,
+ NS = nS^2 + 2*nS*ns,
+ Ns = ns^2)
+ }, list( .link = link, .earg = earg))),
last = eval(substitute(expression({
misc$link = c(mS = .link , ms = .link , nS = .link )
misc$earg = list(mS = .earg , ms = .earg , nS = .earg )
+
misc$expected = TRUE
}), list( .link = link, .earg = earg))),
@@ -489,36 +504,36 @@
loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x = w * y, size = w, prob = mu,
- log = TRUE, dochecking = FALSE))
- },
- vfamily = c("MNSs", "vgenetic"),
- deriv = eval(substitute(expression({
- mS = eta2theta(eta[, 1], link = .link , earg = .earg )
- ms = eta2theta(eta[, 2], link = .link , earg = .earg )
- nS = eta2theta(eta[, 3], link = .link , earg = .earg )
- ns = 1-mS-ms-nS
- dP1 = cbind(2*(mS+ms), 0, 2*(nS+ns-mS), -2*ms, -2*nS, -2*ns)
- dP2 = cbind(2*mS, 2*ms, 2*(nS-mS), 2*(ns-ms), -2*nS, -2*ns)
- dP3 = cbind(0, 0, 2*ms, -2*ms, 2*ns, -2*ns) # n x 6
- dl1 = rowSums(y * dP1 / mu)
- dl2 = rowSums(y * dP2 / mu)
- dl3 = rowSums(y * dP3 / mu)
- dPP.deta = dtheta.deta(cbind(mS, ms, nS), link = .link , earg = .earg )
- c(w) * dPP.deta * cbind(dl1, dl2, dl3)
- }), list( .link = link, .earg = earg))),
- weight = eval(substitute(expression({
- dPP = array(c(dP1,dP2,dP3), c(n,6, 3))
- wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==6 because M==3
- for(i1 in 1:M)
- for(i2 in i1:M) {
- index = iam(i1,i2, M)
- wz[,index] = rowSums(dPP[,,i1,drop = TRUE] *
- dPP[,,i2,drop = TRUE] / mu) *
- dPP.deta[,i1] * dPP.deta[,i2]
- }
- c(w) * wz
- }), list( .link = link, .earg = earg))))
+ sum(dmultinomial(x = w * y, size = w, prob = mu,
+ log = TRUE, dochecking = FALSE))
+ },
+ vfamily = c("MNSs", "vgenetic"),
+ deriv = eval(substitute(expression({
+ mS = eta2theta(eta[, 1], link = .link , earg = .earg )
+ ms = eta2theta(eta[, 2], link = .link , earg = .earg )
+ nS = eta2theta(eta[, 3], link = .link , earg = .earg )
+ ns = 1-mS-ms-nS
+ dP1 = cbind(2*(mS+ms), 0, 2*(nS+ns-mS), -2*ms, -2*nS, -2*ns)
+ dP2 = cbind(2*mS, 2*ms, 2*(nS-mS), 2*(ns-ms), -2*nS, -2*ns)
+ dP3 = cbind(0, 0, 2*ms, -2*ms, 2*ns, -2*ns) # n x 6
+ dl1 = rowSums(y * dP1 / mu)
+ dl2 = rowSums(y * dP2 / mu)
+ dl3 = rowSums(y * dP3 / mu)
+ dPP.deta = dtheta.deta(cbind(mS, ms, nS), link = .link , earg = .earg )
+ c(w) * dPP.deta * cbind(dl1, dl2, dl3)
+ }), list( .link = link, .earg = earg))),
+ weight = eval(substitute(expression({
+ dPP = array(c(dP1,dP2,dP3), c(n,6, 3))
+ wz = matrix(as.numeric(NA), n, dimm(M)) # dimm(M)==6 because M==3
+ for(i1 in 1:M)
+ for(i2 in i1:M) {
+ index = iam(i1,i2, M)
+ wz[,index] = rowSums(dPP[,,i1,drop = TRUE] *
+ dPP[,,i2,drop = TRUE] / mu) *
+ dPP.deta[,i1] * dPP.deta[,i2]
+ }
+ c(w) * wz
+ }), list( .link = link, .earg = earg))))
}
@@ -526,11 +541,12 @@
- ABO = function(link = "logit", earg = list(), ipA = NULL, ipO = NULL)
+ ABO <- function(link = "logit", ipA = NULL, ipO = NULL)
{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
new("vglmff",
blurb = c("ABO Blood Group System (A-B-AB-O phenotype)\n\n",
@@ -591,7 +607,8 @@
}), list( .link = link, .earg = earg))),
- loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
+ loglikelihood =
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL)
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
sum(dmultinomial(x = w * y, size = w, prob = mu, log = TRUE,
@@ -639,51 +656,52 @@
- AB.Ab.aB.ab = function(link = "logit", earg = list(), init.p = NULL)
+ AB.Ab.aB.ab <- function(link = "logit", init.p = NULL)
{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
- new("vglmff",
- blurb = c("AB-Ab-aB-ab phenotype\n\n",
- "Links: ", namesof("p", link, earg = earg, tag = TRUE)),
- deviance = Deviance.categorical.data.vgam,
- initialize = eval(substitute(expression({
- mustart.orig = mustart
- delete.zero.colns = FALSE
- eval(process.categorical.data.vgam)
+ new("vglmff",
+ blurb = c("AB-Ab-aB-ab phenotype\n\n",
+ "Links: ", namesof("p", link, earg = earg, tag = TRUE)),
+ deviance = Deviance.categorical.data.vgam,
+ initialize = eval(substitute(expression({
+ mustart.orig = mustart
+
+ delete.zero.colns = FALSE
+ eval(process.categorical.data.vgam)
if (length(mustart.orig))
mustart = mustart.orig
- ok.col.ny = c("AB","Ab","aB","ab")
- if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
- setequal(ok.col.ny, col.ny)) {
- if (!all(ok.col.ny == col.ny))
- stop("the columns of the response matrix should have ",
- "names (output of colnames()) ordered as ",
- "c('AB','Ab','aB','ab')")
- }
+ ok.col.ny = c("AB","Ab","aB","ab")
+ if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
+ setequal(ok.col.ny, col.ny)) {
+ if (!all(ok.col.ny == col.ny))
+ stop("the columns of the response matrix should have ",
+ "names (output of colnames()) ordered as ",
+ "c('AB','Ab','aB','ab')")
+ }
- predictors.names = namesof("p", .link , earg = .earg , tag = FALSE)
+ predictors.names <- namesof("p", .link , earg = .earg , tag = FALSE)
- if (is.null(etastart)) {
- p = if (is.numeric( .init.p )) rep(.init.p, len = n) else
- c(sqrt(4 * mustart[, 4]))
- etastart = cbind(theta2eta(p, .link , earg = .earg ))
- mustart <- NULL # Since etastart has been computed.
- }
- }), list( .link = link, .init.p=init.p, .earg = earg))),
- linkinv = eval(substitute(function(eta,extra = NULL) {
- p = eta2theta(eta, link = .link , earg = .earg )
- pp4 = p * p / 4
- cbind(AB = 0.5 + pp4,
- Ab = 0.25 - pp4,
- aB = 0.25 - pp4,
- ab = pp4)
- }, list( .link = link, .earg = earg))),
+ if (is.null(etastart)) {
+ p = if (is.numeric( .init.p )) rep(.init.p, len = n) else
+ c(sqrt(4 * mustart[, 4]))
+ etastart = cbind(theta2eta(p, .link , earg = .earg ))
+ mustart <- NULL # Since etastart has been computed.
+ }
+ }), list( .link = link, .init.p=init.p, .earg = earg))),
+ linkinv = eval(substitute(function(eta,extra = NULL) {
+ p = eta2theta(eta, link = .link , earg = .earg )
+ pp4 = p * p / 4
+ cbind(AB = 0.5 + pp4,
+ Ab = 0.25 - pp4,
+ aB = 0.25 - pp4,
+ ab = pp4)
+ }, list( .link = link, .earg = earg))),
last = eval(substitute(expression({
misc$link = c(p = .link )
@@ -692,77 +710,82 @@
}), list( .link = link, .earg = earg))),
- loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x = w * y, size = w, prob = mu,
- log = TRUE, dochecking = FALSE))
- },
- vfamily = c("AB.Ab.aB.ab", "vgenetic"),
- deriv = eval(substitute(expression({
- pp = eta2theta(eta, link = .link , earg = .earg )
- p2 = pp*pp
- nAB = w * y[, 1]
- nAb = w * y[, 2]
- naB = w * y[, 3]
- nab = w * y[, 4]
- dl.dp = 8 * pp * (nAB/(2+p2) - (nAb+naB)/(1-p2) + nab/p2)
- dp.deta = dtheta.deta(pp, link = .link , earg = .earg )
- dl.dp * dp.deta
- }), list( .link = link, .earg = earg))),
- weight = eval(substitute(expression({
- ed2l.dp2 = 4 * p2 * (1/(2+p2) + 2/(1-p2) + 1/p2)
- wz = cbind((dp.deta^2) * ed2l.dp2)
- c(w) * wz
- }), list( .link = link, .earg = earg))))
+ loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x = w * y, size = w, prob = mu,
+ log = TRUE, dochecking = FALSE))
+ },
+ vfamily = c("AB.Ab.aB.ab", "vgenetic"),
+ deriv = eval(substitute(expression({
+ pp = eta2theta(eta, link = .link , earg = .earg )
+
+ p2 = pp*pp
+ nAB = w * y[, 1]
+ nAb = w * y[, 2]
+ naB = w * y[, 3]
+ nab = w * y[, 4]
+
+ dl.dp = 8 * pp * (nAB/(2+p2) - (nAb+naB)/(1-p2) + nab/p2)
+
+ dp.deta = dtheta.deta(pp, link = .link , earg = .earg )
+
+ dl.dp * dp.deta
+ }), list( .link = link, .earg = earg))),
+ weight = eval(substitute(expression({
+ ed2l.dp2 = 4 * p2 * (1/(2+p2) + 2/(1-p2) + 1/p2)
+ wz = cbind((dp.deta^2) * ed2l.dp2)
+ c(w) * wz
+ }), list( .link = link, .earg = earg))))
}
- AA.Aa.aa = function(link = "logit", earg = list(), init.pA = NULL)
+ AA.Aa.aa <- function(link = "logit", init.pA = NULL)
{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
- new("vglmff",
- blurb = c("AA-Aa-aa phenotype\n\n",
- "Links: ", namesof("pA", link, earg = earg)),
- deviance = Deviance.categorical.data.vgam,
- initialize = eval(substitute(expression({
- mustart.orig = mustart
+ new("vglmff",
+ blurb = c("AA-Aa-aa phenotype\n\n",
+ "Links: ", namesof("pA", link, earg = earg)),
+ deviance = Deviance.categorical.data.vgam,
+ initialize = eval(substitute(expression({
+ mustart.orig = mustart
- delete.zero.colns = FALSE
- eval(process.categorical.data.vgam)
+ delete.zero.colns = FALSE
+ eval(process.categorical.data.vgam)
if (length(mustart.orig))
mustart = mustart.orig
- ok.col.ny = c("AA","Aa","aa")
- if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
- setequal(ok.col.ny, col.ny)) {
- if (!all(ok.col.ny == col.ny))
- stop("the columns of the response matrix ",
- "should have names ",
- "(output of colnames()) ordered as c('AA','Aa','aa')")
- }
+ ok.col.ny = c("AA","Aa","aa")
+ if (length(col.ny <- colnames(y)) == length(ok.col.ny) &&
+ setequal(ok.col.ny, col.ny)) {
+ if (!all(ok.col.ny == col.ny))
+ stop("the columns of the response matrix ",
+ "should have names ",
+ "(output of colnames()) ordered as c('AA','Aa','aa')")
+ }
- predictors.names = namesof("pA", .link , earg = .earg , tag = FALSE)
+ predictors.names <- namesof("pA", .link , earg = .earg , tag = FALSE)
- if (is.null(etastart)) {
- pA = if (is.numeric(.init.pA)) rep(.init.pA, n) else
- c(sqrt(mustart[, 1]))
- etastart = cbind(theta2eta(pA, .link , earg = .earg ))
- mustart <- NULL # Since etastart has been computed.
- }
- }), list( .link = link, .init.pA=init.pA, .earg = earg))),
- linkinv = eval(substitute(function(eta,extra = NULL) {
- pA = eta2theta(eta, link = .link , earg = .earg )
- pp = pA*pA
- cbind(AA = pp,
- Aa = 2*pA*(1-pA),
- aa = (1-pA)^2)
- }, list( .link = link, .earg = earg))),
+ if (is.null(etastart)) {
+ pA = if (is.numeric(.init.pA)) rep(.init.pA, n) else
+ c(sqrt(mustart[, 1]))
+ etastart = cbind(theta2eta(pA, .link , earg = .earg ))
+ mustart <- NULL # Since etastart has been computed.
+ }
+ }), list( .link = link, .init.pA=init.pA, .earg = earg))),
+ linkinv = eval(substitute(function(eta,extra = NULL) {
+ pA = eta2theta(eta, link = .link , earg = .earg )
+ pp = pA*pA
+ cbind(AA = pp,
+ Aa = 2*pA*(1-pA),
+ aa = (1-pA)^2)
+ }, list( .link = link, .earg = earg))),
last = eval(substitute(expression({
misc$link = c("pA" = .link )
@@ -771,27 +794,27 @@
}), list( .link = link, .earg = earg))),
- loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else {
- sum(dmultinomial(x = w * y, size = w, prob = mu,
- log = TRUE, dochecking = FALSE))
- },
- vfamily = c("AA.Aa.aa", "vgenetic"),
- deriv = eval(substitute(expression({
- pA = eta2theta(eta, link = .link , earg = .earg )
- nAA = w * y[, 1]
- nAa = w * y[, 2]
- naa = w * y[, 3]
- dl.dpA = (2*nAA+nAa)/pA - (nAa+2*naa)/(1-pA)
- dpA.deta = dtheta.deta(pA, link = .link , earg = .earg )
- dl.dpA * dpA.deta
- }), list( .link = link, .earg = earg))),
- weight = eval(substitute(expression({
- d2l.dp2 = (2*nAA+nAa)/pA^2 + (nAa+2*naa)/(1-pA)^2
- wz = cbind((dpA.deta^2) * d2l.dp2)
- wz
- }), list( .link = link, .earg = earg))))
+ loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else {
+ sum(dmultinomial(x = w * y, size = w, prob = mu,
+ log = TRUE, dochecking = FALSE))
+ },
+ vfamily = c("AA.Aa.aa", "vgenetic"),
+ deriv = eval(substitute(expression({
+ pA = eta2theta(eta, link = .link , earg = .earg )
+ nAA = w * y[, 1]
+ nAa = w * y[, 2]
+ naa = w * y[, 3]
+ dl.dpA = (2*nAA+nAa)/pA - (nAa+2*naa)/(1-pA)
+ dpA.deta = dtheta.deta(pA, link = .link , earg = .earg )
+ dl.dpA * dpA.deta
+ }), list( .link = link, .earg = earg))),
+ weight = eval(substitute(expression({
+ d2l.dp2 = (2*nAA+nAa)/pA^2 + (nAa+2*naa)/(1-pA)^2
+ wz = cbind((dpA.deta^2) * d2l.dp2)
+ wz
+ }), list( .link = link, .earg = earg))))
}
diff --git a/R/family.glmgam.R b/R/family.glmgam.R
index 7be170b..a3168e0 100644
--- a/R/family.glmgam.R
+++ b/R/family.glmgam.R
@@ -13,34 +13,49 @@
- binomialff = function(link = "logit", earg = list(),
- dispersion = 1, mv = FALSE, onedpar = !mv,
- parallel = FALSE, zero = NULL)
+ binomialff <- function(link = "logit",
+ dispersion = 1, mv = FALSE, onedpar = !mv,
+ parallel = FALSE, zero = NULL,
+ bred = FALSE,
+ earg.link = FALSE)
{
+ if (bred)
+ stop("currently 'bred = TRUE' is not working")
+
estimated.dispersion <- dispersion == 0
- if (mode(link )!= "character" && mode(link )!= "name")
- link <- as.character(substitute(link))
- if (!is.list(earg)) earg = list()
- ans =
+
+
+
+
+ if (earg.link) {
+ earg <- link
+ } else {
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ }
+ link <- attr(earg, "function.name")
+
+
+ ans <-
new("vglmff",
blurb = if (mv) c("Multivariate binomial model\n\n",
"Link: ", namesof("mu[,j]", link, earg = earg), "\n",
"Variance: mu[,j]*(1-mu[,j])") else
c("Binomial model\n\n",
"Link: ", namesof("mu", link, earg = earg), "\n",
- "Variance: mu*(1-mu)"),
+ "Variance: mu * (1 - mu)"),
constraints = eval(substitute(expression({
- constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints)
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .parallel = parallel, .zero = zero ))),
infos = eval(substitute(function(...) {
list(Musual = 1,
- zero = .zero)
+ zero = .zero )
}, list( .zero = zero ))),
initialize = eval(substitute(expression({
@@ -55,18 +70,32 @@
if ( .mv ) {
- y = as.matrix(y)
- M = ncol(y)
- if (!all(y == 0 | y == 1))
- stop("response must contain 0's and 1's only")
- dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
- dn2 = if (length(dn2)) {
- paste("E[", dn2, "]", sep = "")
- } else {
- paste("mu", 1:M, sep = "")
- }
- predictors.names = namesof(if (M > 1) dn2 else
- "mu", .link, earg = .earg, short = TRUE)
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+ M = ncol(y)
+ if (!all(y == 0 | y == 1))
+ stop("response must contain 0's and 1's only")
+
+
+
+ dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
+ dn2 = if (length(dn2)) {
+ paste("E[", dn2, "]", sep = "")
+ } else {
+ paste("mu", 1:M, sep = "")
+ }
+ predictors.names <-
+ namesof(if (M > 1) dn2 else
+ "mu", .link , earg = .earg , short = TRUE)
if (!length(mustart) && !length(etastart))
mustart = matrix(colMeans(y), nrow = nrow(y), ncol = ncol(y),
@@ -79,28 +108,28 @@
} else {
- if (!all(w == 1))
- extra$orig.w = w
+ if (!all(w == 1))
+ extra$orig.w = w
- NCOL = function (x) if (is.array(x) && length(dim(x)) > 1 ||
+ NCOL = function (x) if (is.array(x) && length(dim(x)) > 1 ||
is.data.frame(x)) ncol(x) else as.integer(1)
- if (NCOL(y) == 1) {
- if (is.factor(y)) y = (y != levels(y)[1])
- nvec = rep(1, n)
- y[w == 0] <- 0
- if (!all(y == 0 || y == 1))
- stop("response values 'y' must be 0 or 1")
- if (!length(mustart) && !length(etastart))
- mustart = (0.5 + w * y) / (1 + w)
+ if (NCOL(y) == 1) {
+ if (is.factor(y)) y = (y != levels(y)[1])
+ nvec = rep(1, n)
+ y[w == 0] <- 0
+ if (!all(y == 0 || y == 1))
+ stop("response values 'y' must be 0 or 1")
+ if (!length(mustart) && !length(etastart))
+ mustart = (0.5 + w * y) / (1 + w)
- no.successes = y
- if (min(y) < 0)
- stop("Negative data not allowed!")
- if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
- stop("Number of successes must be integer-valued")
- } else if (NCOL(y) == 2) {
+ no.successes = y
+ if (min(y) < 0)
+ stop("Negative data not allowed!")
+ if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
+ stop("Number of successes must be integer-valued")
+ } else if (NCOL(y) == 2) {
if (min(y) < 0)
stop("Negative data not allowed!")
if (any(abs(y - round(y)) > 1.0e-8))
@@ -118,12 +147,13 @@
"or a 2-column matrix where col 1 is the no. of ",
"successes and col 2 is the no. of failures")
}
- predictors.names = namesof("mu", .link, earg = .earg, short = TRUE)
+ predictors.names <-
+ namesof("mu", .link , earg = .earg , short = TRUE)
}
- }), list( .link = link, .mv = mv, .earg = earg ))),
+ }), list( .link = link, .mv = mv, .earg = earg))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- mu = eta2theta(eta, link = .link, earg = .earg)
+ mu <- eta2theta(eta, link = .link , earg = .earg )
mu
}, list( .link = link, .earg = earg ))),
@@ -135,39 +165,42 @@
dpar <- .dispersion
if (!dpar) {
- temp87 = (y-mu)^2 * wz / (dtheta.deta(mu, link = .link,
+ temp87 = (y-mu)^2 * wz / (dtheta.deta(mu, link = .link ,
earg = .earg )^2) # w cancel
- if (.mv && ! .onedpar) {
- dpar = rep(as.numeric(NA), len = M)
- temp87 = cbind(temp87)
- nrow.mu = if (is.matrix(mu)) nrow(mu) else length(mu)
- for(ii in 1:M)
- dpar[ii] = sum(temp87[,ii]) / (nrow.mu - ncol(x))
- if (is.matrix(y) && length(dimnames(y)[[2]]) == length(dpar))
- names(dpar) = dimnames(y)[[2]]
- } else
+ if (.mv && ! .onedpar) {
+ dpar = rep(as.numeric(NA), len = M)
+ temp87 = cbind(temp87)
+ nrow.mu = if (is.matrix(mu)) nrow(mu) else length(mu)
+ for(ii in 1:M)
+ dpar[ii] = sum(temp87[, ii]) / (nrow.mu - ncol(x))
+ if (is.matrix(y) && length(dimnames(y)[[2]]) == length(dpar))
+ names(dpar) = dimnames(y)[[2]]
+ } else
dpar = sum(temp87) / (length(mu) - ncol(x))
- }
- misc$mv = .mv
- misc$dispersion <- dpar
- misc$default.dispersion <- 1
- misc$estimated.dispersion <- .estimated.dispersion
- misc$link = rep( .link, length = M)
- names(misc$link) = if (M > 1) dn2 else "mu"
-
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- for(ii in 1:M) misc$earg[[ii]] = .earg
-
- misc$expected = TRUE
+ }
+ misc$mv = .mv
+ misc$dispersion <- dpar
+ misc$default.dispersion <- 1
+ misc$estimated.dispersion <- .estimated.dispersion
+ misc$bred <- .bred
+
+ misc$link = rep( .link , length = M)
+ names(misc$link) = if (M > 1) dn2 else "mu"
+
+ misc$earg = vector("list", M)
+ names(misc$earg) = names(misc$link)
+ for(ii in 1:M) misc$earg[[ii]] = .earg
+
+ misc$expected = TRUE
}), list( .dispersion = dispersion,
.estimated.dispersion = estimated.dispersion,
.onedpar = onedpar, .mv = mv,
- .link = link, .earg = earg ))),
+ .bred = bred,
+ .link = link, .earg = earg))),
linkfun = eval(substitute(function(mu, extra = NULL) {
- theta2eta(mu, .link, earg = .earg )
- }, list( .link = link, .earg = earg ))),
+ theta2eta(mu, .link , earg = .earg )
+ }, list( .link = link, .earg = earg))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
@@ -200,38 +233,59 @@
vfamily = c("binomialff", "vcategorical"),
deriv = eval(substitute(expression({
+ ybred <- if ( .bred ) {
+ adjustment <- Hvector <-
+ hatvaluesbasic(X_vlm = X_vlm_save,
+ diagWm = (c(w) * c(mu * (1 - mu)))^1)
+
+
+ y + Hvector * (0.5 - pi)
+ y + (c(w) * (mu * (1 - mu))) * Hvector * (0.5 - pi)
+ y + (1 / c(w)) * Hvector * (0.5 - pi) # close
+ y + (c(mu * (1 - mu)) / c(w)) * Hvector * (0.5 - pi) # closer
+ y + (c(mu * (1 - mu)) / c(w)) * Hvector * (0.5 - pi) / 2 # closest
+ y + (1 / c(w)) * Hvector * (0.5 - pi) / 2 #
+ } else {
+ y
+ }
+
+ answer <-
if ( .link == "logit") {
- w * (y - mu)
+ c(w) * (ybred - mu)
} else if ( .link == "cloglog") {
- mu.use = mu
- smallno = 100 * .Machine$double.eps
- mu.use[mu.use < smallno] = smallno
- mu.use[mu.use > 1.0 - smallno] = 1.0 - smallno
- -w * (y - mu) * log1p(-mu.use) / mu.use
- } else
- w * dtheta.deta(mu, link = .link, earg = .earg ) *
- (y / mu - 1.0) / (1.0 - mu)
- }), list( .link = link, .earg = earg ))),
+ mu.use = mu
+ smallno = 100 * .Machine$double.eps
+ mu.use[mu.use < smallno] = smallno
+ mu.use[mu.use > 1.0 - smallno] = 1.0 - smallno
+ -c(w) * (ybred - mu) * log1p(-mu.use) / mu.use
+ } else {
+ c(w) * dtheta.deta(mu, link = .link , earg = .earg ) *
+ (ybred / mu - 1.0) / (1.0 - mu)
+ }
+
+ answer
+ }), list( .link = link, .earg = earg, .bred = bred))),
weight = eval(substitute(expression({
tmp100 = mu * (1.0 - mu)
tmp200 = if ( .link == "logit") {
- cbind(w * tmp100)
+ cbind(c(w) * tmp100)
} else if ( .link == "cloglog") {
- cbind(w * (1.0 - mu.use) * (log1p(-mu.use))^2 / mu.use)
+ cbind(c(w) * (1.0 - mu.use) * (log1p(-mu.use))^2 / mu.use)
} else {
- cbind(w * dtheta.deta(mu, link = .link, earg = .earg)^2 / tmp100)
+ cbind(c(w) * dtheta.deta(mu, link = .link ,
+ earg = .earg )^2 / tmp100)
}
for(ii in 1:M) {
- index500 = !is.finite(tmp200[, ii]) |
- (abs(tmp200[, ii]) < .Machine$double.eps)
- if (any(index500)) { # Diagonal 0's are bad
- tmp200[index500, ii] = .Machine$double.eps
- }
+ index500 = !is.finite(tmp200[, ii]) |
+ (abs(tmp200[, ii]) < .Machine$double.eps)
+ if (any(index500)) { # Diagonal 0's are bad
+ tmp200[index500, ii] = .Machine$double.eps
+ }
}
tmp200
- }), list( .link = link, .earg = earg ))))
+ }), list( .link = link, .earg = earg))))
@@ -248,159 +302,232 @@
- gammaff = function(link = "nreciprocal", earg = list(), dispersion=0)
+ gammaff <- function(link = "nreciprocal", dispersion = 0)
{
- estimated.dispersion <- dispersion == 0
- if (mode(link )!= "character" && mode(link )!= "name")
- link <- as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+ estimated.dispersion <- dispersion == 0
- new("vglmff",
- blurb = c("Gamma distribution\n\n",
- "Link: ", namesof("mu", link, earg =earg), "\n",
- "Variance: mu^2 / k"),
- deviance= function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- devi <- -2 * w * (log(ifelse(y == 0, 1, y/mu)) - (y - mu)/mu)
- if (residuals) {
- sign(y - mu) * sqrt(abs(devi) * w)
- } else sum(w * devi)
- },
- initialize = eval(substitute(expression({
- mustart <- y + 0.167 * (y == 0)
- M = if (is.matrix(y)) ncol(y) else 1
- dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
- dn2 = if (length(dn2)) {
- paste("E[", dn2, "]", sep = "")
- } else {
- paste("mu", 1:M, sep = "")
- }
- predictors.names = namesof(if (M > 1) dn2 else "mu", .link,
- earg =.earg, short = TRUE)
- if (!length(etastart))
- etastart <- theta2eta(mustart, link = .link, earg =.earg)
- }), list( .link = link, .earg = earg ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta, link = .link, earg =.earg)
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- dpar <- .dispersion
- if (!dpar) {
- if (M == 1) {
- temp = w * dmu.deta^2
- dpar = sum(w * (y-mu)^2 * wz / temp) / (length(mu) - ncol(x))
- } else {
- dpar = rep(0, len = M)
- for(spp in 1:M) {
- temp = w * dmu.deta[,spp]^2
- dpar[spp] = sum(w * (y[,spp]-mu[,spp])^2 * wz[,spp]/temp) /
- (length(mu[,spp]) - ncol(x))
- }
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+ new("vglmff",
+ blurb = c("Gamma distribution\n\n",
+ "Link: ", namesof("mu", link, earg = earg), "\n",
+ "Variance: mu^2 / k"),
+ deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ devi <- -2 * w * (log(ifelse(y == 0, 1, y/mu)) - (y - mu)/mu)
+ if (residuals) {
+ sign(y - mu) * sqrt(abs(devi) * w)
+ } else sum(w * devi)
+ },
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ dispersion = .dispersion )
+ }, list( .dispersion = dispersion ))),
+ initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ mustart <- y + 0.167 * (y == 0)
+
+ M = if (is.matrix(y)) ncol(y) else 1
+ dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
+ dn2 = if (length(dn2)) {
+ paste("E[", dn2, "]", sep = "")
+ } else {
+ paste("mu", 1:M, sep = "")
+ }
+
+ predictors.names <-
+ namesof(if (M > 1) dn2 else "mu", .link ,
+ earg = .earg , short = TRUE)
+
+ if (!length(etastart))
+ etastart <- theta2eta(mustart, link = .link , earg = .earg )
+ }), list( .link = link, .earg = earg))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta, link = .link , earg = .earg )
+ }, list( .link = link, .earg = earg))),
+ last = eval(substitute(expression({
+ dpar <- .dispersion
+ if (!dpar) {
+ if (M == 1) {
+ temp = w * dmu.deta^2
+ dpar = sum(w * (y-mu)^2 * wz / temp) / (length(mu) - ncol(x))
+ } else {
+ dpar = rep(0, len = M)
+ for(spp in 1:M) {
+ temp = w * dmu.deta[,spp]^2
+ dpar[spp] = sum(w * (y[,spp]-mu[,spp])^2 * wz[,spp]/temp) /
+ (length(mu[,spp]) - ncol(x))
}
}
- misc$dispersion <- dpar
- misc$default.dispersion <- 0
- misc$estimated.dispersion <- .estimated.dispersion
- misc$link = rep( .link, length = M)
- names(misc$link) = if (M > 1) paste("mu", 1:M, sep = "") else "mu"
+ }
+ misc$dispersion <- dpar
+ misc$default.dispersion <- 0
+ misc$estimated.dispersion <- .estimated.dispersion
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- for(ii in 1:M) misc$earg[[ii]] = .earg
+ misc$link = rep( .link , length = M)
+ names(misc$link) = if (M > 1) paste("mu", 1:M, sep = "") else "mu"
- misc$expected = TRUE
- }), list( .dispersion = dispersion, .earg = earg,
- .estimated.dispersion = estimated.dispersion,
- .link = link ))),
- linkfun = eval(substitute(function(mu, extra = NULL) {
- theta2eta(mu, link = .link, earg =.earg)
- }, list( .link = link, .earg = earg ))),
- vfamily = "gammaff",
- deriv = eval(substitute(expression({
- dl.dmu = (y-mu) / mu^2
- dmu.deta = dtheta.deta(theta = mu, link = .link, earg =.earg)
- w * dl.dmu * dmu.deta
- }), list( .link = link, .earg = earg ))),
- weight = eval(substitute(expression({
- d2l.dmu2 = 1 / mu^2
- w * dmu.deta^2 * d2l.dmu2
- }), list( .link = link, .earg = earg ))))
+ misc$earg = vector("list", M)
+ names(misc$earg) = names(misc$link)
+ for(ii in 1:M) misc$earg[[ii]] = .earg
+
+ misc$expected = TRUE
+ misc$multipleResponses <- TRUE
+ }), list( .dispersion = dispersion, .earg = earg,
+ .estimated.dispersion = estimated.dispersion,
+ .link = link ))),
+ linkfun = eval(substitute(function(mu, extra = NULL) {
+ theta2eta(mu, link = .link , earg = .earg )
+ }, list( .link = link, .earg = earg))),
+ vfamily = "gammaff",
+ deriv = eval(substitute(expression({
+ Musual <- 1
+ ncoly <- ncol(as.matrix(y))
+
+ dl.dmu = (y-mu) / mu^2
+ dmu.deta = dtheta.deta(theta = mu, link = .link , earg = .earg )
+ c(w) * dl.dmu * dmu.deta
+ }), list( .link = link, .earg = earg))),
+ weight = eval(substitute(expression({
+ d2l.dmu2 = 1 / mu^2
+ wz <- dmu.deta^2 * d2l.dmu2
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = ncoly)
+ }), list( .link = link, .earg = earg))))
}
- inverse.gaussianff = function(link = "natural.ig", dispersion=0)
+
+ inverse.gaussianff <- function(link = "natural.ig",
+ dispersion = 0)
{
- estimated.dispersion <- dispersion==0
- warning("@deviance() not finished")
- warning("needs checking, but I'm sure it works")
+ estimated.dispersion <- dispersion == 0
+ warning("@deviance() not finished")
+ warning("needs checking, but I'm sure it works")
- if (mode(link )!= "character" && mode(link )!= "name")
- link <- as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
- new("vglmff",
- blurb = c("Inverse Gaussian distribution\n\n",
- "Link: ", namesof("mu", link), "\n",
- "Variance: mu^3 /k"),
- deviance= function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- pow <- 3 # Use Quasi()$deviance with pow==3
- devy <- y^(2-pow) / (1-pow) - y^(2-pow) / (2-pow)
- devmu <- y * mu^(1-pow) / (1-pow) - mu^(2-pow) / (2-pow)
- devi <- 2 * (devy - devmu)
- if (residuals) {
- sign(y - mu) * sqrt(abs(devi) * w)
- } else sum(w * devi)
- },
- initialize = eval(substitute(expression({
- mu <- y + 0.167 * (y == 0)
- if (!length(etastart))
- etastart <- theta2eta(mu, link = .link)
- }), list( .link = link ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta, link = .link)
- }, list( .link = link ))),
- last = eval(substitute(expression({
- dpar <- .dispersion
- if (!dpar) {
- temp <- w * dmu.deta^2
- dpar <- sum( w * (y-mu)^2 * wz / temp ) / (length(mu) - ncol(x))
- }
- misc$dispersion <- dpar
- misc$default.dispersion <- 0
- misc$estimated.dispersion <- .estimated.dispersion
- misc$link = rep( .link, length = M)
- names(misc$link) = if (M > 1) paste("mu", 1:M, sep = "") else "mu"
- }), list( .dispersion = dispersion,
- .estimated.dispersion = estimated.dispersion,
- .link = link ))),
- linkfun = eval(substitute(function(mu, extra = NULL) {
- theta2eta(mu, link = .link)
- }, list( .link = link ))),
- vfamily = "inverse.gaussianff",
- deriv = eval(substitute(expression({
- dl.dmu <- (y-mu) / mu^3
- dmu.deta <- dtheta.deta(theta = mu, link = .link)
- w * dl.dmu * dmu.deta
- }), list( .link = link ))),
- weight = eval(substitute(expression({
- d2l.dmu2 <- 1 / mu^3
- w * dmu.deta^2 * d2l.dmu2
- }), list( .link = link ))))
+
+ new("vglmff",
+ blurb = c("Inverse Gaussian distribution\n\n",
+ "Link: ", namesof("mu", link, earg = earg), "\n",
+ "Variance: mu^3 / k"),
+ deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ pow <- 3 # Use Quasi()$deviance with pow==3
+ devy <- y^(2-pow) / (1-pow) - y^(2-pow) / (2-pow)
+ devmu <- y * mu^(1-pow) / (1-pow) - mu^(2-pow) / (2-pow)
+ devi <- 2 * (devy - devmu)
+ if (residuals) {
+ sign(y - mu) * sqrt(abs(devi) * w)
+ } else sum(w * devi)
+ },
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ dispersion = .dispersion )
+ }, list( .earg = earg , .dispersion = dispersion ))),
+ initialize = eval(substitute(expression({
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ mu <- y + 0.167 * (y == 0)
+
+
+
+ M = if (is.matrix(y)) ncol(y) else 1
+ dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
+ dn2 = if (length(dn2)) {
+ paste("E[", dn2, "]", sep = "")
+ } else {
+ paste("mu", 1:M, sep = "")
+ }
+
+ predictors.names <-
+ namesof(if (M > 1) dn2 else "mu", .link , .earg , short = TRUE)
+
+
+ if (!length(etastart))
+ etastart <- theta2eta(mu, link = .link , .earg )
+ }), list( .link = link, .earg = earg))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta, link = .link , earg = .earg )
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ dpar <- .dispersion
+ if (!dpar) {
+ temp <- w * dmu.deta^2
+ dpar <- sum( w * (y-mu)^2 * wz / temp ) / (length(mu) - ncol(x))
+ }
+ misc$dispersion <- dpar
+ misc$default.dispersion <- 0
+ misc$estimated.dispersion <- .estimated.dispersion
+
+ misc$link = rep( .link , length = M)
+ names(misc$link) = if (M > 1) paste("mu", 1:M, sep = "") else "mu"
+
+ misc$earg = vector("list", M)
+ names(misc$earg) = names(misc$link)
+ for(ii in 1:M)
+ misc$earg[[ii]] = .earg
+
+ misc$expected = TRUE
+ misc$multipleResponses <- TRUE
+ }), list( .dispersion = dispersion,
+ .estimated.dispersion = estimated.dispersion,
+ .link = link, .earg = earg ))),
+ linkfun = eval(substitute(function(mu, extra = NULL) {
+ theta2eta(mu, link = .link, earg = .earg )
+ }, list( .link = link, .earg = earg ))),
+ vfamily = "inverse.gaussianff",
+ deriv = eval(substitute(expression({
+ Musual <- 1
+ ncoly <- ncol(as.matrix(y))
+
+ dl.dmu <- (y - mu) / mu^3
+ dmu.deta <- dtheta.deta(theta = mu, link = .link , earg = .earg )
+ c(w) * dl.dmu * dmu.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+ d2l.dmu2 <- 1 / mu^3
+ wz <- dmu.deta^2 * d2l.dmu2
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = ncoly)
+ }), list( .link = link, .earg = earg ))))
}
-dinv.gaussian = function(x, mu, lambda, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
+dinv.gaussian <- function(x, mu, lambda, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
rm(log)
LLL = max(length(x), length(mu), length(lambda))
- x = rep(x, len = LLL);
- mu = rep(mu, len = LLL);
- lambda = rep(lambda, len = LLL)
- logdensity = rep(log(0), len = LLL)
+ x <- rep(x, len = LLL);
+ mu <- rep(mu, len = LLL);
+ lambda <- rep(lambda, len = LLL)
+ logdensity <- rep(log(0), len = LLL)
xok = (x > 0)
logdensity[xok] = 0.5 * log(lambda[xok] / (2 * pi * x[xok]^3)) -
@@ -412,7 +539,7 @@ dinv.gaussian = function(x, mu, lambda, log = FALSE) {
}
-pinv.gaussian = function(q, mu, lambda) {
+pinv.gaussian <- function(q, mu, lambda) {
if (any(mu <= 0))
stop("mu must be positive")
if (any(lambda <= 0))
@@ -422,7 +549,7 @@ pinv.gaussian = function(q, mu, lambda) {
q = rep(q, len = LLL)
mu = rep(mu, len = LLL)
lambda = rep(lambda, len = LLL)
- ans = q
+ ans <- q
ans[q <= 0] = 0
bb = q > 0
@@ -433,7 +560,7 @@ pinv.gaussian = function(q, mu, lambda) {
}
-rinv.gaussian = function(n, mu, lambda) {
+rinv.gaussian <- function(n, mu, lambda) {
use.n = if ((length.n <- length(n)) > 1) length.n else
if (!is.Numeric(n, integer.valued = TRUE,
allowable.length = 1, positive = TRUE))
@@ -445,7 +572,7 @@ rinv.gaussian = function(n, mu, lambda) {
Z = rnorm(use.n)^2 # rchisq(use.n, df = 1)
phi = lambda / mu
y1 = 1 - 0.5 * (sqrt(Z^2 + 4*phi*Z) - Z) / phi
- ans = mu * ifelse((1+y1)*u > 1, 1/y1, y1)
+ ans <- mu * ifelse((1+y1)*u > 1, 1/y1, y1)
ans[mu <= 0] = NaN
ans[lambda <= 0] = NaN
ans
@@ -461,23 +588,23 @@ rinv.gaussian = function(n, mu, lambda) {
- inv.gaussianff = function(lmu = "loge", llambda = "loge",
- emu = list(), elambda = list(),
- imethod = 1,
- ilambda = 1,
- shrinkage.init = 0.99,
- zero = NULL)
+ inv.gaussianff <- function(lmu = "loge", llambda = "loge",
+ imethod = 1, ilambda = NULL,
+ parallel = FALSE, intercept.apply = FALSE,
+ shrinkage.init = 0.99,
+ zero = NULL)
{
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu <- as.character(substitute(lmu))
- if (mode(llambda) != "character" && mode(llambda) != "name")
- llambda <- as.character(substitute(llambda))
+ lmu <- as.list(substitute(lmu))
+ emu <- link2list(lmu)
+ lmu <- attr(emu, "function.name")
+
+ llambda <- as.list(substitute(llambda))
+ elambda <- link2list(llambda)
+ llambda <- attr(elambda, "function.name")
- if (!is.list(emu)) emu = list()
- if (!is.list(elambda)) elambda = list()
if (!is.Numeric(imethod, allowable.length = 1,
integer.valued = TRUE, positive = TRUE) ||
@@ -488,47 +615,90 @@ rinv.gaussian = function(n, mu, lambda) {
shrinkage.init > 1)
stop("bad input for argument 'shrinkage.init'")
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+ if (is.logical(parallel) && parallel && length(zero))
+ stop("set 'zero = NULL' if 'parallel = TRUE'")
+
+
+
new("vglmff",
blurb = c("Inverse Gaussian distribution\n\n",
"f(y) = sqrt(lambda/(2*pi*y^3)) * ",
- "exp(-lambda * (y - mu)^2 / (2 * mu^2 * y)); y, mu and lambda > 0",
+ "exp(-lambda * (y - mu)^2 / (2 * mu^2 * y)); y, mu & lambda > 0",
"Link: ", namesof("mu", lmu, earg = emu), ", ",
namesof("lambda", llambda, earg = elambda), "\n",
"Mean: ", "mu\n",
"Variance: mu^3 / lambda"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
+ constraints = cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
+ intercept.apply = .intercept.apply )
+
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero,
+ .parallel = parallel, .intercept.apply = intercept.apply ))),
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ zero = .zero )
+ }, list( .zero = zero ))),
initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ ncoly <- ncol(y)
+ Musual <- 2
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+
+ mynames1 <- paste("mu", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames2 <- paste("lambda", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ c(namesof(mynames1, .lmu , earg = .emu , short = TRUE),
+ namesof(mynames2, .llambda , earg = .elambda , short = TRUE))[
+ interleave.VGAM(M, M = Musual)]
+
- if (any(y <= 0))
- stop("Require the response to have positive values")
- predictors.names =
- c(namesof("mu", .lmu, earg = .emu, short = TRUE),
- namesof("lambda", .llambda, earg = .elambda, short = TRUE))
if (!length(etastart)) {
- init.mu =
- if ( .imethod == 3) {
- 0 * y + 1.1 * median(y) + 1/8
- } else if ( .imethod == 2) {
- use.this = weighted.mean(y, w)
+ init.mu <-
+ if ( .imethod == 2) {
+ mediany <- apply(y, 2, median)
+ matrix(1.1 * mediany + 1/8, n, ncoly, byrow = TRUE)
+ } else if ( .imethod == 3) {
+ use.this <- colSums(y * w) / colSums(w) # weighted.mean(y, w)
(1 - .sinit) * y + .sinit * use.this
} else {
- 0 * y + weighted.mean(y, w) + 1/8
+ matrix(colSums(y * w) / colSums(w) + 1/8,
+ n, ncoly, byrow = TRUE)
}
- init.lambda = rep(if (length( .ilambda )) .ilambda else 1.0,
- len = n)
+ variancey <- apply(y, 2, var)
+ init.la <- matrix(if (length( .ilambda )) .ilambda else
+ (init.mu^3) / (0.10 + variancey),
+ n, ncoly, byrow = TRUE)
- etastart = cbind(
- theta2eta(init.mu, link = .lmu, earg = .emu),
- theta2eta(init.lambda, link = .llambda, earg = .elambda))
+ etastart <- cbind(
+ theta2eta(init.mu, link = .lmu , earg = .emu ),
+ theta2eta(init.la, link = .llambda , earg = .elambda ))[,
+ interleave.VGAM(M, M = Musual)]
}
}), list( .lmu = lmu, .llambda = llambda,
.emu = emu, .elambda = elambda,
@@ -536,55 +706,82 @@ rinv.gaussian = function(n, mu, lambda) {
.imethod = imethod, .ilambda = ilambda ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[, 1], link = .lmu, earg = .emu)
+ eta2theta(eta[, c(TRUE, FALSE)], link = .lmu , earg = .emu )
}, list( .lmu = lmu, .emu = emu, .elambda = elambda ))),
last = eval(substitute(expression({
- misc$link = c(mu = .lmu, lambda = .llambda)
- misc$earg = list(mu = .emu, lambda = .elambda)
- misc$imethod = .imethod
- misc$shrinkage.init = .sinit
- misc$expected = TRUE
+ Musual <- extra$Musual
+ misc$link <-
+ c(rep( .lmu , length = ncoly),
+ rep( .llambda , length = ncoly))[interleave.VGAM(M, M = Musual)]
+ temp.names <- c(mynames1, mynames2)[interleave.VGAM(M, M = Musual)]
+ names(misc$link) <- temp.names
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- temp.names
+ for(ii in 1:ncoly) {
+ misc$earg[[Musual*ii-1]] <- .emu
+ misc$earg[[Musual*ii ]] <- .elambda
+ }
+
+ misc$Musual <- Musual
+ misc$imethod <- .imethod
+ misc$shrinkage.init <- .sinit
+ misc$expected <- TRUE
+ misc$multipleResponses <- FALSE
+ misc$parallel <- .parallel
+ misc$intercept.apply <- .intercept.apply
}), list( .lmu = lmu, .llambda = llambda,
.emu = emu, .elambda = elambda,
+ .parallel = parallel, .intercept.apply = intercept.apply,
.sinit = shrinkage.init,
.imethod = imethod ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- lambda <- eta2theta(eta[, 2], link = .llambda, earg = .elambda)
+ mymu <- eta2theta(eta[, c(TRUE, FALSE)],
+ link = .lmu , earg = .emu )
+ lambda <- eta2theta(eta[, c(FALSE, TRUE)],
+ link = .llambda , earg = .elambda )
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w * dinv.gaussian(x=y, mu = mu, lambda = lambda, log = TRUE))
+ sum(c(w) * dinv.gaussian(x = y, mu = mymu,
+ lambda = lambda, log = TRUE))
}
- }, list( .llambda = llambda, .emu = emu,
- .elambda = elambda ))),
+ }, list( .lmu = lmu, .llambda = llambda,
+ .emu = emu, .elambda = elambda ))),
vfamily = "inv.gaussianff",
deriv = eval(substitute(expression({
- mymu <- eta2theta(eta[, 1], link = .lmu, earg = .emu)
- lambda <- eta2theta(eta[, 2], link = .llambda, earg = .elambda)
-
- dmu.deta <- dtheta.deta(theta = mymu, link = .lmu, earg = .emu)
- dlambda.deta <- dtheta.deta(theta = lambda, link = .llambda,
- earg = .elambda)
-
- dl.dmu = lambda * (y - mymu) / mymu^3
- dl.dlambda <- 0.5 / lambda - (y-mymu)^2 / (2 * mymu^2 * y)
- c(w) * cbind(dl.dmu * dmu.deta,
- dl.dlambda * dlambda.deta)
+ Musual <- 2
+ mymu <- eta2theta(eta[, c(TRUE, FALSE)],
+ link = .lmu , earg = .emu )
+ lambda <- eta2theta(eta[, c(FALSE, TRUE)],
+ link = .llambda , earg = .elambda )
+
+ dmu.deta <- dtheta.deta(theta = mymu , link = .lmu , earg = .emu )
+ dlambda.deta <- dtheta.deta(theta = lambda, link = .llambda ,
+ earg = .elambda )
+
+ dl.dmu <- lambda * (y - mymu) / mymu^3
+ dl.dlambda <- 0.5 / lambda - (y - mymu)^2 / (2 * mymu^2 * y)
+ myderiv <- c(w) * cbind(dl.dmu * dmu.deta,
+ dl.dlambda * dlambda.deta)
+ myderiv[, interleave.VGAM(M, M = Musual)]
}), list( .lmu = lmu, .llambda = llambda,
.emu = emu, .elambda = elambda ))),
weight = eval(substitute(expression({
- d2l.dmu2 = lambda / mymu^3
+ ned2l.dmu2 <- lambda / mymu^3
+ ned2l.dlambda2 <- 0.5 / (lambda^2)
- d2l.dlambda2 = 0.5 / (lambda^2)
- wz <- cbind(dmu.deta^2 * d2l.dmu2,
- dlambda.deta^2 * d2l.dlambda2)
- c(w) * wz
+ wz <- cbind(dmu.deta^2 * ned2l.dmu2,
+ dlambda.deta^2 * ned2l.dlambda2)[,
+ interleave.VGAM(M, M = Musual)]
+
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
}), list( .lmu = lmu, .llambda = llambda,
.emu = emu, .elambda = elambda ))))
}
@@ -592,16 +789,31 @@ rinv.gaussian = function(n, mu, lambda) {
- poissonff <- function(link = "loge", earg = list(),
- dispersion = 1, onedpar = FALSE,
- imu = NULL, imethod = 1,
- parallel = FALSE, zero = NULL)
+ poissonff <- function(link = "loge",
+ dispersion = 1, onedpar = FALSE,
+ imu = NULL, imethod = 1,
+ parallel = FALSE, zero = NULL,
+ bred = FALSE,
+ earg.link = FALSE)
{
+
+ if (bred)
+ stop("currently 'bred = TRUE' is not working")
+
estimated.dispersion <- dispersion==0
- if (mode(link )!= "character" && mode(link )!= "name")
- link <- as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+
+
+ if (earg.link) {
+ earg <- link
+ } else {
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ }
+ link <- attr(earg, "function.name")
+
+
+
if (!is.Numeric(imethod, allowable.length = 1,
integer.valued = TRUE, positive = TRUE) ||
@@ -611,59 +823,73 @@ rinv.gaussian = function(n, mu, lambda) {
!is.Numeric(imu, positive = TRUE))
stop("bad input for argument 'imu'")
+
new("vglmff",
blurb = c("Poisson distribution\n\n",
"Link: ", namesof("mu", link, earg = earg), "\n",
"Variance: mu"),
constraints = eval(substitute(expression({
- constraints <- cm.vgam(matrix(1,M, 1), x, .parallel, constraints)
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .parallel = parallel, .zero = zero ))),
- deviance= function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
nz = y > 0
devi = -(y - mu)
devi[nz] = devi[nz] + y[nz] * log(y[nz]/mu[nz])
- if (residuals) sign(y - mu) * sqrt(2 * abs(devi) * w) else
- 2 * sum(w * devi)
+ if (residuals) sign(y - mu) * sqrt(2 * abs(devi) * c(w)) else
+ 2 * sum(c(w) * devi)
},
infos = eval(substitute(function(...) {
list(Musual = 1,
- zero = .zero)
+ zero = .zero )
}, list( .zero = zero ))),
initialize = eval(substitute(expression({
- y = as.matrix(y)
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
M = ncoly = ncol(y)
assign("CQO.FastAlgorithm", ( .link == "loge"), envir = VGAM:::VGAMenv)
dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
dn2 = if (length(dn2)) {
- paste("E[", dn2, "]", sep = "")
+ paste("E[", dn2, "]", sep = "")
} else {
- paste("mu", 1:M, sep = "")
+ paste("mu", 1:M, sep = "")
}
- predictors.names =
- namesof(if (M > 1) dn2 else "mu", .link, earg = .earg, short = TRUE)
+ predictors.names <-
+ namesof(if (M > 1) dn2 else "mu", .link ,
+ earg = .earg , short = TRUE)
if (!length(etastart)) {
- mu.init = pmax(y, 1/8)
- for(iii in 1:ncol(y)) {
- if ( .imethod == 2) {
- mu.init[,iii] = weighted.mean(y[,iii], w) + 1/8
- } else if ( .imethod == 3) {
- mu.init[,iii] = median(y[,iii]) + 1/8
- }
+ mu.init = pmax(y, 1/8)
+ for(iii in 1:ncol(y)) {
+ if ( .imethod == 2) {
+ mu.init[, iii] = weighted.mean(y[, iii], w[, iii]) + 1/8
+ } else if ( .imethod == 3) {
+ mu.init[, iii] = median(y[, iii]) + 1/8
}
- if (length( .imu ))
- mu.init = matrix( .imu, n, ncoly, byrow = TRUE)
- etastart <- theta2eta(mu.init, link = .link, earg = .earg)
+ }
+ if (length( .imu ))
+ mu.init = matrix( .imu , n, ncoly, byrow = TRUE)
+ etastart <- theta2eta(mu.init, link = .link , earg = .earg )
}
}), list( .link = link, .estimated.dispersion = estimated.dispersion,
- .imethod = imethod, .imu = imu, .earg = earg ))),
+ .imethod = imethod, .imu = imu, .earg = earg))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- mu = eta2theta(eta, link = .link, earg = .earg)
- mu
- }, list( .link = link, .earg = earg ))),
+ mu = eta2theta(eta, link = .link , earg = .earg )
+ mu
+ }, list( .link = link, .earg = earg))),
last = eval(substitute(expression({
if (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
@@ -671,13 +897,13 @@ rinv.gaussian = function(n, mu, lambda) {
dpar <- .dispersion
if (!dpar) {
temp87 = (y-mu)^2 *
- wz / (dtheta.deta(mu, link = .link, earg = .earg)^2) # w cancel
+ wz / (dtheta.deta(mu, link = .link , earg = .earg )^2) # w cancel
if (M > 1 && ! .onedpar) {
dpar = rep(as.numeric(NA), len = M)
temp87 = cbind(temp87)
nrow.mu = if (is.matrix(mu)) nrow(mu) else length(mu)
for(ii in 1:M)
- dpar[ii] = sum(temp87[,ii]) / (nrow.mu - ncol(x))
+ dpar[ii] = sum(temp87[, ii]) / (nrow.mu - ncol(x))
if (is.matrix(y) && length(dimnames(y)[[2]])==length(dpar))
names(dpar) = dimnames(y)[[2]]
} else {
@@ -687,10 +913,14 @@ rinv.gaussian = function(n, mu, lambda) {
misc$dispersion <- dpar
misc$default.dispersion <- 1
misc$estimated.dispersion <- .estimated.dispersion
+
misc$expected = TRUE
- misc$link = rep( .link, length = M)
- names(misc$link) = if (M > 1) dn2 else "mu"
misc$imethod = .imethod
+ misc$multipleResponses <- TRUE
+
+
+ misc$link = rep( .link , length = M)
+ names(misc$link) = if (M > 1) dn2 else "mu"
misc$earg = vector("list", M)
names(misc$earg) = names(misc$link)
@@ -698,435 +928,329 @@ rinv.gaussian = function(n, mu, lambda) {
misc$earg[[ii]] = .earg
}), list( .dispersion = dispersion, .imethod=imethod,
.estimated.dispersion = estimated.dispersion,
- .onedpar = onedpar, .link = link, .earg = earg ))),
+ .onedpar = onedpar, .link = link, .earg = earg))),
linkfun = eval(substitute( function(mu, extra = NULL) {
- theta2eta(mu, link = .link, earg = .earg)
- }, list( .link = link, .earg = earg ))),
+ theta2eta(mu, link = .link , earg = .earg )
+ }, list( .link = link, .earg = earg))),
loglikelihood =
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- if (residuals) w*(y/mu - 1) else {
- sum(w * dpois(x=y, lambda=mu, log = TRUE))
+ if (residuals) w * (y / mu - 1) else {
+ sum(w * dpois(x = y, lambda = mu, log = TRUE))
}
},
vfamily = "poissonff",
deriv = eval(substitute(expression({
+ answer <-
if ( .link == "loge" && (any(mu < .Machine$double.eps))) {
- w * (y - mu)
+ c(w) * (y - mu)
} else {
lambda <- mu
- dl.dlambda <- (y-lambda) / lambda
+ dl.dlambda <- (y - lambda) / lambda
dlambda.deta <- dtheta.deta(theta = lambda,
- link = .link, earg = .earg)
- w * dl.dlambda * dlambda.deta
+ link = .link , earg = .earg )
+ c(w) * dl.dlambda * dlambda.deta
}
- }), list( .link = link, .earg = earg ))),
+
+
+ if ( .bred ) {
+ adjustment <- Hvector <-
+ hatvaluesbasic(X_vlm = X_vlm_save,
+ diagWm = c(w) * mu)
+ answer + (c(w) * mu) * Hvector / 2
+ } else {
+ answer
+ }
+ }), list( .link = link, .earg = earg, .bred = bred))),
weight = eval(substitute(expression({
- if ( .link == "loge" && (any(mu < .Machine$double.eps))) {
- tmp600 = mu
- tmp600[tmp600 < .Machine$double.eps] = .Machine$double.eps
- w * tmp600
- } else {
- d2l.dlambda2 = 1 / lambda
- d2lambda.deta2=d2theta.deta2(theta = lambda,link= .link,earg = .earg)
- w * dlambda.deta^2 * d2l.dlambda2
+ if ( .link == "loge" && (any(mu < .Machine$double.eps))) {
+ tmp600 = mu
+ tmp600[tmp600 < .Machine$double.eps] = .Machine$double.eps
+ c(w) * tmp600
+ } else {
+ d2l.dlambda2 = 1 / lambda
+ d2lambda.deta2 = d2theta.deta2(theta = lambda,
+ link = .link , earg = .earg )
+ c(w) * dlambda.deta^2 * d2l.dlambda2
}
- }), list( .link = link, .earg = earg ))))
+ }), list( .link = link, .earg = earg))))
}
- quasibinomialff = function(link = "logit", mv = FALSE, onedpar = !mv,
- parallel = FALSE, zero = NULL) {
- dispersion = 0 # Estimated; this is the only difference with binomialff()
- ans =
- binomialff(link = link, dispersion=dispersion, mv=mv, onedpar=onedpar,
- parallel=parallel, zero=zero)
- ans at vfamily = "quasibinomialff"
- ans
-}
-
- quasipoissonff = function(link = "loge", onedpar = FALSE, parallel = FALSE,
- zero = NULL) {
- dispersion = 0 # Estimated; this is the only difference with poissonff()
- ans =
- poissonff(link = link, dispersion=dispersion, onedpar=onedpar,
- parallel=parallel, zero=zero)
- ans at vfamily = "quasipoissonff"
- ans
-}
+ quasibinomialff <- function(
+ link = "logit",
+ mv = FALSE, onedpar = !mv,
+ parallel = FALSE, zero = NULL) {
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
-poissonqn.control <- function(save.weight = TRUE, ...)
-{
- list(save.weight=save.weight)
+ dispersion <- 0 # Estimated; this is the only difference with binomialff()
+ ans <- binomialff(link = earg, earg.link = TRUE,
+ dispersion = dispersion,
+ mv = mv, onedpar = onedpar,
+ parallel = parallel, zero = zero)
+ ans at vfamily <- "quasibinomialff"
+ ans
}
- poissonqn = function(link = "loge", earg = list(),
- dispersion = 1, onedpar = FALSE,
- parallel = FALSE, zero = NULL,
- wwts=c("expected","observed","qn"))
-{
- estimated.dispersion <- dispersion==0
- if (mode(link )!= "character" && mode(link )!= "name")
- link <- as.character(substitute(link))
- if (mode(wwts) != "character" && mode(wwts) != "name")
- wwts <- as.character(substitute(wwts))
- wwts <- match.arg(wwts, c("expected","observed","qn"))[1]
- if (!is.list(earg)) earg = list()
- new("vglmff",
- blurb = c("Poisson distribution\n\n",
- "Link: ", namesof("mu", link, earg = earg), "\n",
- "Variance: mu"),
- constraints = eval(substitute(expression({
- constraints <- cm.vgam(matrix(1,M, 1), x, .parallel, constraints)
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
- }), list( .parallel = parallel, .zero = zero ))),
- deviance= function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- nz = y > 0
- devi = -(y - mu)
- devi[nz] = devi[nz] + y[nz] * log(y[nz]/mu[nz])
- if (residuals) sign(y - mu) * sqrt(2 * abs(devi) * w) else
- 2 * sum(w * devi)
- },
- initialize = eval(substitute(expression({
- M = if (is.matrix(y)) ncol(y) else 1
- dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
- dn2 = if (length(dn2)) {
- paste("E[", dn2, "]", sep = "")
- } else {
- paste("mu", 1:M, sep = "")
- }
- predictors.names = namesof(if (M > 1) dn2 else "mu", .link,
- earg = .earg, short = TRUE)
- mu = pmax(y, 0.167) # y + 0.167 * (y == 0)
- if (!length(etastart))
- etastart <- theta2eta(mu, link = .link, earg = .earg)
- }), list( .link = link, .estimated.dispersion = estimated.dispersion,
- .earg = earg ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta, link = .link, earg = .earg)
- }, list( .link = link,
- .earg = earg ))),
- last = eval(substitute(expression({
- dpar <- .dispersion
- if (!dpar) {
- temp87 = (y-mu)^2 *
- wz / (dtheta.deta(mu, link = .link, earg = .earg)^2)
- if (M > 1 && ! .onedpar) {
- dpar = rep(as.numeric(NA), len = M)
- temp87 = cbind(temp87)
- nrow.mu = if (is.matrix(mu)) nrow(mu) else length(mu)
- for(i in 1:M)
- dpar[i] = sum(temp87[,i]) / (nrow.mu - ncol(x))
- if (is.matrix(y) &&
- length(dimnames(y)[[2]]) == length(dpar))
- names(dpar) = dimnames(y)[[2]]
- } else
- dpar = sum(temp87) / (length(mu) - ncol(x))
- }
- misc$BFGS = TRUE
- misc$dispersion <- dpar
- misc$default.dispersion <- 1
- misc$estimated.dispersion <- .estimated.dispersion
- misc$expected = FALSE
- misc$link = rep( .link, length = M)
- names(misc$link) = if (M > 1) dn2 else "mu"
+ quasipoissonff <- function(link = "loge", onedpar = FALSE,
+ parallel = FALSE, zero = NULL) {
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- for(ii in 1:M)
- misc$earg[[ii]] = .earg
- }), list( .dispersion = dispersion,
- .earg = earg,
- .estimated.dispersion = estimated.dispersion,
- .onedpar = onedpar, .link = link ))),
- linkfun = eval(substitute(function(mu, extra = NULL) {
- theta2eta(mu, link = .link, earg = .earg)
- }, list( .link = link,
- .earg = earg ))),
- loglikelihood =
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- if (residuals) w*(y/mu - 1) else {
- sum(w * dpois(x=y, lambda=mu, log = TRUE))
- }
- },
- vfamily = "poissonqn",
- deriv = eval(substitute(expression({
- if (iter == 1) {
- etanew = eta
- } else {
- derivold = derivnew
- etaold = etanew
- etanew = eta
- }
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
- derivnew =
- if ( .link == "loge" && (any(mu < .Machine$double.eps))) {
- w * (y - mu)
- } else {
- lambda <- mu
- dl.dlambda <- (y-lambda) / lambda
- dlambda.deta <- dtheta.deta(theta = lambda,
- link = .link, earg = .earg)
- w * dl.dlambda * dlambda.deta
- }
- derivnew
- }), list( .link = link,
- .earg = earg ))),
- weight = eval(substitute(expression({
- if ( .wwts == "qn") {
- if (iter == 1) {
- wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
- } else {
- wzold = wznew
- wznew = qnupdate(w = w, wzold=wzold,
- dderiv=(derivold-derivnew),
- deta = etanew-etaold, M=M,
- trace=trace) # weights incorporated in args
- }
- } else if ( .wwts == "expected") {
- wznew = if ( .link == "loge") {
- tmp600 = mu
- tmp600[tmp600 < .Machine$double.eps] = .Machine$double.eps
- w * tmp600
- } else {
- d2l.dlambda2 = 1 / lambda
- w * dlambda.deta^2 * d2l.dlambda2
- }
- } else {
- wznew = if ( .link == "loge") {
- tmp600 = y
- tmp600[y < .Machine$double.eps] = sqrt(.Machine$double.eps)
- w * tmp600
- } else {
- stop("this is not programmed in yet")
- }
- }
- wznew
- }), list( .wwts = wwts, .link = link,
- .earg = earg ))))
+
+
+ dispersion <- 0 # Estimated; this is the only difference with poissonff()
+ ans <- poissonff(link = earg, earg.link = TRUE,
+ dispersion = dispersion, onedpar = onedpar,
+ parallel = parallel, zero = zero)
+ ans at vfamily <- "quasipoissonff"
+ ans
}
- dexppoisson = function(lmean = "loge", emean = list(),
- ldispersion = "logit", edispersion = list(),
- idispersion=0.8,
- zero = NULL)
+ dexppoisson <- function(lmean = "loge",
+ ldispersion = "logit",
+ idispersion = 0.8,
+ zero = NULL)
{
- if (mode(lmean)!= "character" && mode(lmean)!= "name")
- lmean = as.character(substitute(lmean))
- if (mode(ldispersion)!= "character" && mode(ldispersion)!= "name")
- ldispersion = as.character(substitute(ldispersion))
+
if (!is.Numeric(idispersion, positive = TRUE))
- stop("bad input for 'idispersion'")
- if (!is.list(emean)) emean = list()
- if (!is.list(edispersion)) edispersion = list()
+ stop("bad input for 'idispersion'")
- new("vglmff",
- blurb = c("Double exponential Poisson distribution\n\n",
- "Link: ",
- namesof("mean", lmean, earg = emean), ", ",
- namesof("dispersion", lmean, earg = edispersion), "\n",
- "Mean: ", "mean\n",
- "Variance: mean / dispersion"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- M = if (is.matrix(y)) ncol(y) else 1
- dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
- dn2 = if (length(dn2)) {
- paste("E[", dn2, "]", sep = "")
- } else {
- "mu"
- }
- predictors.names =
- c(namesof(dn2, link = .lmean, earg = .emean, short = TRUE),
- namesof("dispersion", link = .ldispersion,
- earg = .edispersion, short = TRUE))
- init.mu = pmax(y, 1/8)
- if (!length(etastart))
- etastart = cbind(theta2eta(init.mu,
- link = .lmean ,
- earg = .emean ),
- theta2eta(rep( .idispersion, length.out = n),
- link = .ldispersion ,
- earg = .edispersion))
- }), list( .lmean = lmean, .emean = emean,
- .ldispersion = ldispersion, .edispersion = edispersion,
- .idispersion = idispersion ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[, 1], link = .lmean, earg = .emean)
- }, list( .lmean = lmean, .emean = emean,
- .ldispersion = ldispersion, .edispersion = edispersion ))),
- last = eval(substitute(expression({
- misc$expected = TRUE
- misc$link = c("mean"= .lmean, "dispersion"= .ldispersion)
- misc$earg = list(mean= .emean, dispersion= .edispersion)
- }), list( .lmean = lmean, .emean = emean,
- .ldispersion = ldispersion, .edispersion = edispersion ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- lambda = eta2theta(eta[, 1], link = .lmean,
- earg = .emean )
- Disper = eta2theta(eta[, 2], link = .ldispersion,
- earg = .edispersion )
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(w * (0.5*log(Disper) +
- Disper*(y-lambda) + Disper*y*log(lambda)))
- }
+ lmean <- as.list(substitute(lmean))
+ emean <- link2list(lmean)
+ lmean <- attr(emean, "function.name")
+
+ ldisp <- as.list(substitute(ldispersion))
+ edisp <- link2list(ldisp)
+ ldisp <- attr(edisp, "function.name")
+
+ idisp <- idispersion
+
+
+ new("vglmff",
+ blurb = c("Double exponential Poisson distribution\n\n",
+ "Link: ",
+ namesof("mean", lmean, earg = emean), ", ",
+ namesof("dispersion", ldisp, earg = edisp), "\n",
+ "Mean: ", "mean\n",
+ "Variance: mean / dispersion"),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ lmean = .lmean ,
+ zero = .zero )
+ }, list( .lmean = lmean ))),
+
+
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ ncol.w.max = 1,
+ ncol.y.max = 1)
+
+
+ M = if (is.matrix(y)) ncol(y) else 1
+ dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
+ dn2 = if (length(dn2)) {
+ paste("E[", dn2, "]", sep = "")
+ } else {
+ "mu"
+ }
+ predictors.names <-
+ c(namesof(dn2, link = .lmean, earg = .emean, short = TRUE),
+ namesof("dispersion", link = .ldisp, earg = .edisp, short = TRUE))
+
+ init.mu = pmax(y, 1/8)
+ tmp2 <- rep( .idisp , length.out = n)
+
+ if (!length(etastart))
+ etastart <-
+ cbind(theta2eta(init.mu, link = .lmean , earg = .emean ),
+ theta2eta(tmp2, link = .ldisp , earg = .edisp ))
+ }), list( .lmean = lmean, .emean = emean,
+ .ldisp = ldisp, .edisp = edisp,
+ .idisp = idisp ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[, 1], link = .lmean, earg = .emean)
+ }, list( .lmean = lmean, .emean = emean,
+ .ldisp = ldisp, .edisp = edisp ))),
+ last = eval(substitute(expression({
+ misc$expected <- TRUE
+ misc$link <- c(mean = .lmean , dispersion = .ldisp )
+ misc$earg <- list(mean = .emean , dispersion = .edisp )
+ }), list( .lmean = lmean, .emean = emean,
+ .ldisp = ldisp, .edisp = edisp ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ lambda = eta2theta(eta[, 1], link = .lmean,
+ earg = .emean )
+ Disper = eta2theta(eta[, 2], link = .ldisp,
+ earg = .edisp )
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(w * (0.5*log(Disper) +
+ Disper*(y-lambda) + Disper*y*log(lambda)))
+ }
}, list( .lmean = lmean, .emean = emean,
- .ldispersion = ldispersion, .edispersion = edispersion ))),
+ .ldisp = ldisp, .edisp = edisp ))),
vfamily = "dexppoisson",
deriv = eval(substitute(expression({
lambda = eta2theta(eta[, 1], link = .lmean, earg = .emean)
- Disper = eta2theta(eta[, 2], link = .ldispersion,
- earg = .edispersion)
+ Disper = eta2theta(eta[, 2], link = .ldisp,
+ earg = .edisp)
dl.dlambda = Disper * (y / lambda - 1)
dl.dDisper = y * log(lambda) + y - lambda + 0.5 / Disper
dlambda.deta = dtheta.deta(theta = lambda, link = .lmean,
earg = .emean)
- dDisper.deta = dtheta.deta(theta = Disper, link = .ldispersion,
- earg = .edispersion)
+ dDisper.deta = dtheta.deta(theta = Disper, link = .ldisp,
+ earg = .edisp)
c(w) * cbind(dl.dlambda * dlambda.deta,
dl.dDisper * dDisper.deta)
}), list( .lmean = lmean, .emean = emean,
- .ldispersion = ldispersion, .edispersion = edispersion ))),
+ .ldisp = ldisp, .edisp = edisp ))),
weight = eval(substitute(expression({
- wz = matrix(as.numeric(NA), nrow=n, ncol=2) # diagonal
+ wz = matrix(as.numeric(NA), nrow = n, ncol = 2) # diagonal
usethis.lambda = pmax(lambda, .Machine$double.eps / 10000)
- wz[,iam(1, 1,M)] = (Disper / usethis.lambda) * dlambda.deta^2
- wz[,iam(2, 2,M)] = (0.5 / Disper^2) * dDisper.deta^2
+ wz[, iam(1, 1, M)] = (Disper / usethis.lambda) * dlambda.deta^2
+ wz[, iam(2, 2, M)] = (0.5 / Disper^2) * dDisper.deta^2
c(w) * wz
}), list( .lmean = lmean, .emean = emean,
- .ldispersion = ldispersion,
- .edispersion = edispersion ))))
+ .ldisp = ldisp,
+ .edisp = edisp ))))
}
- dexpbinomial = function(lmean = "logit", ldispersion = "logit",
- emean = list(), edispersion = list(),
- idispersion=0.25,
- zero=2)
-{
- if (mode(lmean)!= "character" && mode(lmean)!= "name")
- lmean = as.character(substitute(lmean))
- if (mode(ldispersion)!= "character" && mode(ldispersion)!= "name")
- ldispersion = as.character(substitute(ldispersion))
+ dexpbinomial <- function(lmean = "logit", ldispersion = "logit",
+ idispersion = 0.25, zero = 2) {
+
+ lmean <- as.list(substitute(lmean))
+ emean <- link2list(lmean)
+ lmean <- attr(emean, "function.name")
+
+ ldisp <- as.list(substitute(ldispersion))
+ edisp <- link2list(ldisp)
+ ldisp <- attr(edisp, "function.name")
+ idisp <- idispersion
+
+
if (!is.Numeric(idispersion, positive = TRUE))
stop("bad input for 'idispersion'")
- if (!is.list(emean)) emean = list()
- if (!is.list(edispersion)) edispersion = list()
- new("vglmff",
- blurb = c("Double Exponential Binomial distribution\n\n",
- "Link: ",
- namesof("mean", lmean, earg = emean), ", ",
- namesof("dispersion", lmean, earg = edispersion), "\n",
- "Mean: ", "mean\n"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (!all(w == 1))
- extra$orig.w = w
+ new("vglmff",
+ blurb = c("Double Exponential Binomial distribution\n\n",
+ "Link: ",
+ namesof("mean", lmean, earg = emean), ", ",
+ namesof("dispersion", ldisp, earg = edisp), "\n",
+ "Mean: ", "mean\n"),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (!all(w == 1))
+ extra$orig.w = w
- if (ncol(cbind(w)) != 1)
- stop("'weights' must be a vector or a one-column matrix")
- NCOL = function (x)
- if (is.array(x) && length(dim(x)) > 1 ||
- is.data.frame(x)) ncol(x) else as.integer(1)
+ if (ncol(cbind(w)) != 1)
+ stop("'weights' must be a vector or a one-column matrix")
- if (NCOL(y) == 1) {
+ NCOL = function (x)
+ if (is.array(x) && length(dim(x)) > 1 ||
+ is.data.frame(x)) ncol(x) else as.integer(1)
+ if (NCOL(y) == 1) {
- if (is.factor(y)) y = (y != levels(y)[1])
- nvec = rep(1, n)
- y[w == 0] <- 0
- if (!all(y == 0 || y == 1))
- stop("response values 'y' must be 0 or 1")
- init.mu =
- mustart = (0.5 + w * y) / (1 + w)
+ if (is.factor(y)) y = (y != levels(y)[1])
+ nvec = rep(1, n)
+ y[w == 0] <- 0
+ if (!all(y == 0 || y == 1))
+ stop("response values 'y' must be 0 or 1")
+ init.mu =
+ mustart = (0.5 + w * y) / (1 + w)
- no.successes = y
- if (min(y) < 0)
- stop("Negative data not allowed!")
- if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
- stop("Number of successes must be integer-valued")
- } else if (NCOL(y) == 2) {
- if (min(y) < 0)
- stop("Negative data not allowed!")
- if (any(abs(y - round(y)) > 1.0e-8))
- stop("Count data must be integer-valued")
- y = round(y)
- nvec = y[, 1] + y[, 2]
- y = ifelse(nvec > 0, y[, 1] / nvec, 0)
- w = w * nvec
- init.mu =
- mustart = (0.5 + nvec * y) / (1 + nvec)
- } else
- stop("for the dexpbinomial family, response 'y' must be a ",
- "vector of 0 and 1's\n",
+
+ no.successes = y
+ if (min(y) < 0)
+ stop("Negative data not allowed!")
+ if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
+ stop("Number of successes must be integer-valued")
+ } else if (NCOL(y) == 2) {
+ if (min(y) < 0)
+ stop("Negative data not allowed!")
+ if (any(abs(y - round(y)) > 1.0e-8))
+ stop("Count data must be integer-valued")
+ y = round(y)
+ nvec = y[, 1] + y[, 2]
+ y = ifelse(nvec > 0, y[, 1] / nvec, 0)
+ w = w * nvec
+ init.mu =
+ mustart = (0.5 + nvec * y) / (1 + nvec)
+ } else
+ stop("for the dexpbinomial family, response 'y' must be a ",
+ "vector of 0 and 1's\n",
"or a factor (first level = fail, ",
"other levels = success),\n",
"or a 2-column matrix where col 1 is the no. of ",
"successes and col 2 is the no. of failures")
- dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
- dn2 = if (length(dn2)) {
- paste("E[", dn2, "]", sep = "")
- } else {
- "mu"
- }
- predictors.names =
- c(namesof(dn2, link = .lmean, earg = .emean, short = TRUE),
- namesof("dispersion", link = .ldispersion,
- earg = .edispersion, short = TRUE))
- if (!length(etastart))
- etastart = cbind(theta2eta(init.mu,
- link = .lmean,
- earg = .emean),
- theta2eta(rep( .idispersion, len = n),
- link = .ldispersion,
- earg = .edispersion))
- }), list( .lmean = lmean, .emean = emean,
- .ldispersion = ldispersion, .edispersion = edispersion,
- .idispersion = idispersion ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[, 1], link = .lmean, earg = .emean)
- }, list( .lmean = lmean, .emean = emean,
- .ldispersion = ldispersion, .edispersion = edispersion ))),
- last = eval(substitute(expression({
- misc$expected = TRUE
- misc$link = c("mean" = .lmean, "dispersion" = .ldispersion)
- misc$earg = list( mean = .emean, dispersion = .edispersion)
- }), list( .lmean = lmean, .emean = emean,
- .ldispersion = ldispersion, .edispersion = edispersion ))),
+ dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
+ dn2 = if (length(dn2)) {
+ paste("E[", dn2, "]", sep = "")
+ } else {
+ "mu"
+ }
+
+ predictors.names <-
+ c(namesof(dn2, .lmean, earg = .emean, short = TRUE),
+ namesof("dispersion", .ldisp, earg = .edisp, short = TRUE))
+
+ tmp2 <- rep( .idisp , len = n)
+
+ if (!length(etastart))
+ etastart = cbind(theta2eta(init.mu, .lmean, earg = .emean),
+ theta2eta(tmp2, .ldisp, earg = .edisp))
+ }), list( .lmean = lmean, .emean = emean,
+ .ldisp = ldisp, .edisp = edisp,
+ .idisp = idisp ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[, 1], link = .lmean, earg = .emean)
+ }, list( .lmean = lmean, .emean = emean,
+ .ldisp = ldisp, .edisp = edisp ))),
+ last = eval(substitute(expression({
+ misc$expected <- TRUE
+ misc$link <- c("mean" = .lmean, "dispersion" = .ldisp)
+ misc$earg <- list( mean = .emean, dispersion = .edisp)
+ }), list( .lmean = lmean, .emean = emean,
+ .ldisp = ldisp, .edisp = edisp ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
prob = eta2theta(eta[, 1], link = .lmean, earg = .emean)
- Disper = eta2theta(eta[, 2], link = .ldispersion,
- earg = .edispersion)
+ Disper = eta2theta(eta[, 2], link = .ldisp, earg = .edisp)
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
@@ -1139,12 +1263,11 @@ poissonqn.control <- function(save.weight = TRUE, ...)
temp1 * (1-Disper) + temp2 * (1 - Disper)))
}
}, list( .lmean = lmean, .emean = emean,
- .ldispersion = ldispersion, .edispersion = edispersion ))),
+ .ldisp = ldisp, .edisp = edisp ))),
vfamily = "dexpbinomial",
deriv = eval(substitute(expression({
- prob = eta2theta(eta[, 1], link = .lmean, earg = .emean)
- Disper = eta2theta(eta[, 2], link = .ldispersion,
- earg = .edispersion)
+ prob = eta2theta(eta[, 1], link = .lmean, earg = .emean)
+ Disper = eta2theta(eta[, 2], link = .ldisp, earg = .edisp)
temp1 = y * log(ifelse(y > 0, y, 1)) # y*log(y)
temp2 = (1.0-y) * log1p(ifelse(y < 1, -y, 0)) # (1-y)*log(1-y)
temp3 = prob * (1.0-prob)
@@ -1154,33 +1277,34 @@ poissonqn.control <- function(save.weight = TRUE, ...)
dl.dDisper = 0.5 / Disper + w * (y * log(prob) +
(1-y)*log1p(-prob) - temp1 - temp2)
- dprob.deta = dtheta.deta(theta=prob, link = .lmean, earg = .emean)
- dDisper.deta = dtheta.deta(theta = Disper, link = .ldispersion,
- earg = .edispersion)
+ dprob.deta = dtheta.deta(theta = prob, .lmean, earg = .emean)
+ dDisper.deta = dtheta.deta(theta = Disper, .ldisp, earg = .edisp)
cbind(dl.dprob * dprob.deta,
dl.dDisper * dDisper.deta)
}), list( .lmean = lmean, .emean = emean,
- .ldispersion = ldispersion, .edispersion = edispersion ))),
+ .ldisp = ldisp, .edisp = edisp ))),
weight = eval(substitute(expression({
- wz = matrix(as.numeric(NA), nrow=n, ncol=2) # diagonal
- wz[,iam(1, 1,M)] = w * (Disper / temp3) * dprob.deta^2
- wz[,iam(2, 2,M)] = (0.5 / Disper^2) * dDisper.deta^2
+ wz = matrix(as.numeric(NA), nrow = n, ncol = 2) # diagonal
+ wz[, iam(1, 1, M)] = w * (Disper / temp3) * dprob.deta^2
+ wz[, iam(2, 2, M)] = (0.5 / Disper^2) * dDisper.deta^2
wz
}), list( .lmean = lmean, .emean = emean,
- .ldispersion = ldispersion, .edispersion = edispersion ))))
+ .ldisp = ldisp, .edisp = edisp ))))
}
- mbinomial = function(mvar = NULL, link = "logit", earg = list(),
+ mbinomial <- function(mvar = NULL, link = "logit",
parallel = TRUE,
smallno = .Machine$double.eps^(3/4))
{
- if (mode(link )!= "character" && mode(link )!= "name")
- link <- as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
if (!is.Numeric(smallno, positive = TRUE,
allowable.length = 1) ||
smallno > 1e-4)
@@ -1199,11 +1323,11 @@ poissonqn.control <- function(save.weight = TRUE, ...)
blurb = c("Matched binomial model (intercepts fitted)\n\n",
"Link: ", namesof("mu[,j]", link, earg = earg)),
constraints = eval(substitute(expression({
- constraints <- cm.vgam(matrix(1,M, 1), x, .parallel, constraints,
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
intercept.apply = TRUE)
constraints[[extra$mvar]] <- diag(M)
- specialCM = list(a = vector("list", M-1))
+ specialCM <- list(a = vector("list", M-1))
for(ii in 1:(M-1)) {
specialCM[[1]][[ii]] =
(constraints[[extra$mvar]])[, 1+ii,drop = FALSE]
@@ -1252,15 +1376,16 @@ poissonqn.control <- function(save.weight = TRUE, ...)
extra$mvar = mvar
extra$index9 = temp9
- predictors.names = namesof("mu", .link, earg = .earg, short = TRUE)
- predictors.names = rep(predictors.names, len = M)
+ predictors.names <-
+ namesof("mu", .link , earg = .earg , short = TRUE)
+ predictors.names <- rep(predictors.names, len = M)
}), list( .link = link, .earg = earg, .mvar = mvar ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- mu = eta2theta(eta, link = .link, earg = .earg)
+ mu = eta2theta(eta, link = .link , earg = .earg )
mu[cbind(1:extra$n, extra$index9)]
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link = rep( .link, length = M)
+ misc$link = rep( .link , length = M)
names(misc$link) = if (M > 1) paste("mu(matched set ",
1:M, ")", sep = "") else "mu"
misc$earg = vector("list", M)
@@ -1268,11 +1393,11 @@ poissonqn.control <- function(save.weight = TRUE, ...)
for(ii in 1:M) misc$earg[[ii]] = .earg
misc$expected = TRUE
- }), list( .link = link, .earg = earg ))),
+ }), list( .link = link, .earg = earg))),
linkfun = eval(substitute(function(mu, extra = NULL) {
- temp = theta2eta(mu, .link, earg = .earg )
+ temp = theta2eta(mu, .link , earg = .earg )
matrix(temp, extra$n, extra$M)
- }, list( .link = link, .earg = earg ))),
+ }, list( .link = link, .earg = earg))),
loglikelihood =
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
if (residuals) w * (y / mu - (1-y) / (1-mu)) else {
@@ -1304,12 +1429,12 @@ poissonqn.control <- function(save.weight = TRUE, ...)
mu.use[mu.use > 1 - smallno] = 1 - smallno
-w * (y - mu) * log1p(-mu.use) / mu.use
} else
- w * dtheta.deta(mu, link = .link, earg = .earg ) *
+ w * dtheta.deta(mu, link = .link , earg = .earg ) *
(y/mu - 1)/(1-mu)
result = matrix(0, n, M)
result[cbind(1:n, extra$index9)] = answer
result
- }), list( .link = link, .earg = earg ))),
+ }), list( .link = link, .earg = earg))),
weight = eval(substitute(expression({
tmp100 = mu*(1-mu)
answer = if ( .link == "logit") {
@@ -1317,8 +1442,8 @@ poissonqn.control <- function(save.weight = TRUE, ...)
} else if ( .link == "cloglog") {
cbind(w * (1-mu.use) * (log1p(-mu.use))^2 / mu.use )
} else {
- cbind(w * dtheta.deta(mu, link = .link,
- earg = .earg)^2 / tmp100)
+ cbind(w * dtheta.deta(mu, link = .link ,
+ earg = .earg )^2 / tmp100)
}
result = matrix( .smallno, n, M)
@@ -1330,7 +1455,7 @@ poissonqn.control <- function(save.weight = TRUE, ...)
-mypool = function(x, index) {
+mypool <- function(x, index) {
answer = x
uindex = unique(index)
for(ii in uindex) {
@@ -1341,16 +1466,25 @@ mypool = function(x, index) {
}
+
+
+
+
+
+
+
if (FALSE)
- mbino = function()
+ mbino <- function()
{
- link = "logit"
- earg = list()
- parallel = TRUE
+ link <- "logit"
+ earg <- list()
+ parallel <- TRUE
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
- if (mode(link )!= "character" && mode(link )!= "name")
- link <- as.character(substitute(link))
- if (!is.list(earg)) earg = list()
if (is.logical(parallel) && !parallel)
stop("'parallel' must be TRUE")
@@ -1359,7 +1493,7 @@ mypool = function(x, index) {
blurb = c("Matched binomial model (intercepts not fitted)\n\n",
"Link: ", namesof("mu[,j]", link, earg = earg)),
constraints = eval(substitute(expression({
- constraints <- cm.vgam(matrix(1,M, 1), x, .parallel, constraints,
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
intercept.apply = FALSE)
}), list( .parallel = parallel ))),
initialize = eval(substitute(expression({
@@ -1390,7 +1524,7 @@ mypool = function(x, index) {
stop("Response not of the right form")
if (!length(etastart))
- etastart <- theta2eta(mustart, link= "logit", earg = list())
+ etastart <- theta2eta(mustart, link = "logit", earg = list())
temp1 = attr(x, "assign")
mvar = extra$mvar
@@ -1408,7 +1542,8 @@ mypool = function(x, index) {
extra$M = M
extra$rlex = xrle
extra$index9 = temp9
- predictors.names = namesof("mu", .link, earg = .earg, short = TRUE)
+ predictors.names <-
+ namesof("mu", .link , earg = .earg , short = TRUE)
}), list( .link = link, .earg = earg, .mvar = mvar ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
denominator = exp(eta)
@@ -1416,10 +1551,10 @@ mypool = function(x, index) {
numerator / denominator
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link = c(mu = .link)
- misc$earg = list( mu = .earg )
- misc$expected = TRUE
- }), list( .link = link, .earg = earg ))),
+ misc$link <- c(mu = .link )
+ misc$earg <- list( mu = .earg )
+ misc$expected <- TRUE
+ }), list( .link = link, .earg = earg))),
loglikelihood =
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
if (residuals) w*(y/mu - (1-y)/(1-mu)) else {
@@ -1433,7 +1568,7 @@ mypool = function(x, index) {
w * (y - mu)
} else stop("can only handle the logit link")
answer
- }), list( .link = link, .earg = earg ))),
+ }), list( .link = link, .earg = earg))),
weight = eval(substitute(expression({
tmp100 = mu*(1-mu)
answer = if ( .link == "logit") {
@@ -1456,8 +1591,7 @@ mypool = function(x, index) {
- augbinomial = function(link = "logit", earg = list(),
- mv = FALSE,
+ augbinomial <- function(link = "logit", mv = FALSE,
parallel = TRUE)
{
@@ -1467,9 +1601,10 @@ mypool = function(x, index) {
!parallel)
warning("Argument 'parallel' should be assigned 'TRUE' only")
- if (mode(link )!= "character" && mode(link )!= "name")
- link <- as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
new("vglmff",
blurb = if (mv) c("Augmented multivariate binomial model\n\n",
@@ -1508,13 +1643,13 @@ mypool = function(x, index) {
} else {
paste("mu", 1:M, sep = "")
}
- predictors.names =
+ predictors.names <-
c(namesof(if (M > 1) dn2 else
- "mu.1", .link, earg = .earg, short = TRUE),
+ "mu.1", .link , earg = .earg , short = TRUE),
namesof(if (M > 1) dn2 else
- "mu.2", .link, earg = .earg, short = TRUE))
+ "mu.2", .link , earg = .earg , short = TRUE))
NOS = M / Musual
- predictors.names =
+ predictors.names <-
predictors.names[interleave.VGAM(Musual * NOS, M = Musual)]
@@ -1567,21 +1702,21 @@ mypool = function(x, index) {
"or a 2-column matrix where col 1 is the no. of ",
"successes and col 2 is the no. of failures")
}
- predictors.names =
- c(namesof("mu.1", .link, earg = .earg, short = TRUE),
- namesof("mu.2", .link, earg = .earg, short = TRUE))
+ predictors.names <-
+ c(namesof("mu.1", .link , earg = .earg , short = TRUE),
+ namesof("mu.2", .link , earg = .earg , short = TRUE))
}
- }), list( .link = link, .mv = mv, .earg = earg ))),
+ }), list( .link = link, .mv = mv, .earg = earg))),
linkinv = eval(substitute(function(eta, extra = NULL) {
Mdiv2 = ncol(eta) / 2
index1 = 2*(1:Mdiv2) - 1
mu = eta2theta(eta[, index1],
- link = .link, earg = .earg)
+ link = .link , earg = .earg )
mu
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
misc$mv = .mv
- misc$link = rep( .link, length = M)
+ misc$link = rep( .link , length = M)
names(misc$link) = if (M > 1) dn2 else "mu"
misc$earg = vector("list", M)
@@ -1593,9 +1728,9 @@ mypool = function(x, index) {
}), list( .link = link, .mv = mv, .earg = earg,
.parallel = parallel ))),
linkfun = eval(substitute(function(mu, extra = NULL) {
- usualanswer = theta2eta(mu, .link, earg = .earg )
+ usualanswer = theta2eta(mu, .link , earg = .earg )
kronecker(usualanswer, matrix(1, 1, 2))
- }, list( .link = link, .earg = earg ))),
+ }, list( .link = link, .earg = earg))),
loglikelihood =
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
if (residuals) w * (y / mu - (1-y) / (1-mu)) else {
@@ -1629,7 +1764,7 @@ mypool = function(x, index) {
y * (1 - mu)
} else {
stop("this is not programmed in yet")
- dtheta.deta(mu, link = .link, earg = .earg ) *
+ dtheta.deta(mu, link = .link , earg = .earg ) *
(y / mu - 1.0) / (1.0 - mu)
}
deriv2 = Konst1 * w *
@@ -1637,7 +1772,7 @@ mypool = function(x, index) {
-(1 - y) * mu
} else {
stop("this is not programmed in yet")
- dtheta.deta(mu, link = .link, earg = .earg ) *
+ dtheta.deta(mu, link = .link , earg = .earg ) *
(y / mu - 1.0) / (1.0 - mu)
}
@@ -1645,14 +1780,14 @@ mypool = function(x, index) {
deriv2))[, interleave.VGAM(Musual * NOS,
M = Musual)]
myderiv
- }), list( .link = link, .earg = earg ))),
+ }), list( .link = link, .earg = earg))),
weight = eval(substitute(expression({
tmp100 = mu * (1.0 - mu)
tmp200 = if ( .link == "logit") {
cbind(w * tmp100)
} else {
- cbind(w * dtheta.deta(mu, link = .link, earg = .earg)^2 / tmp100)
+ cbind(w * dtheta.deta(mu, link = .link , earg = .earg )^2 / tmp100)
}
wk_wt1 = (Konst1^2) * tmp200 * (1 - mu)
@@ -1664,7 +1799,7 @@ mypool = function(x, index) {
my_wk_wt = cbind(wk_wt1, wk_wt2)
my_wk_wt = my_wk_wt[, interleave.VGAM(Musual * NOS, M = Musual)]
my_wk_wt
- }), list( .link = link, .earg = earg ))))
+ }), list( .link = link, .earg = earg))))
}
diff --git a/R/family.loglin.R b/R/family.loglin.R
index 0e25e65..028114e 100644
--- a/R/family.loglin.R
+++ b/R/family.loglin.R
@@ -8,236 +8,273 @@
loglinb2 <- function(exchangeable = FALSE, zero = NULL)
{
- new("vglmff",
- blurb = c("Log-linear model for binary data\n\n",
- "Links: ",
- "Identity: u1, u2, u12",
- "\n"),
- constraints = eval(substitute(expression({
- constraints <- cm.vgam(matrix(c(1,1,0, 0,0,1), 3, 2), x,
- .exchangeable, constraints,
- intercept.apply = TRUE)
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
- }), list( .exchangeable = exchangeable, .zero = zero ))),
- initialize = expression({
-
- y <- as.matrix(y)
- predictors.names <- c("u1", "u2", "u12")
- if (ncol(y) != 2)
- stop("ncol(y) must be = 2")
-
- if (length(mustart) + length(etastart) == 0) {
- mustart <- matrix(as.numeric(NA), nrow(y), 4)
- mustart[,1] <- weighted.mean((1-y[,1])*(1-y[,2]), w)
- mustart[,2] <- weighted.mean((1-y[,1])*y[,2], w)
- mustart[,3] <- weighted.mean(y[,1]*(1-y[,2]), w)
- mustart[,4] <- weighted.mean(y[,1]*y[,2], w)
- if (any(mustart == 0))
- stop("some combinations of the response not realized")
- }
- }),
- linkinv = function(eta, extra = NULL) {
- u1 <- eta[,1]
- u2 <- eta[,2]
- u12 <- eta[,3]
- denom <- 1 + exp(u1) + exp(u2) + exp(u1 + u2 + u12)
- cbind("00" = 1/denom,
- "01" = exp(u2) / denom,
- "10" = exp(u1) / denom,
- "11" = exp(u1+u2+u12) / denom)
- },
- last = expression({
- misc$link = c("u1" = "identity", "u2" = "identity", "u12" = "identity")
- misc$earg = list(u1 = list(), u2 = list(), u12 = list())
- }),
- linkfun = function(mu, extra = NULL) {
- u0 <- log(mu[,1])
- u2 <- log(mu[,2]) - u0
- u1 <- log(mu[,3]) - u0
- u12 <- log(mu[,4]) - u0 - u1 - u2
- cbind(u1, u2, u12)
- },
- loglikelihood = function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
- u1 <- eta[,1]
- u2 <- eta[,2]
- u12 <- eta[,3]
- denom <- 1 + exp(u1) + exp(u2) + exp(u1 + u2 + u12)
- u0 <- -log(denom)
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else
- sum(w*(u0 + u1*y[,1] + u2*y[,2] + u12*y[,1]*y[,2]))
- },
- vfamily = c("loglinb2"),
- deriv = expression({
- u1 <- eta[,1]
- u2 <- eta[,2]
- u12 <- eta[,3]
- denom <- 1 + exp(u1) + exp(u2) + exp(u1 + u2 + u12)
- du0.du1 <- -(exp(u1) + exp(u1 + u2 + u12)) / denom
- du0.du2 <- -(exp(u2) + exp(u1 + u2 + u12)) / denom
- du0.du12 <- -exp(u1 + u2 + u12) / denom
- c(w) * cbind(du0.du1 + y[,1],
- du0.du2 + y[,2],
- du0.du12 + y[,1] * y[,2])
- }),
- weight = expression({
- d2u0.du1.2 <- -(exp(u1) + exp(u1 + u2 + u12)) * (1+exp(u2)) / denom^2
- d2u0.du22 <- -(exp(u2) + exp(u1 + u2 + u12)) * (1+exp(u1)) / denom^2
- d2u0.du122 <- -exp(u1 + u2 + u12) * (1+exp(u1)+exp(u2)) / denom^2
- d2u0.du1u2 <- -(exp(u1 + u2 + u12) - exp(u1 + u2)) / denom^2
- d2u0.du1u3 <- -(1 + exp(u2)) * exp(u1 + u2 + u12) / denom^2
- d2u0.du2u3 <- -(1 + exp(u1)) * exp(u1 + u2 + u12) / denom^2
-
- wz <- matrix(as.numeric(NA), n, dimm(M))
- wz[,iam(1,1,M)] <- -d2u0.du1.2
- wz[,iam(2,2,M)] <- -d2u0.du22
- wz[,iam(3,3,M)] <- -d2u0.du122
- wz[,iam(1,2,M)] <- -d2u0.du1u2
- wz[,iam(1,3,M)] <- -d2u0.du1u3
- wz[,iam(2,3,M)] <- -d2u0.du2u3
- c(w) * wz
- }))
+ new("vglmff",
+ blurb = c("Log-linear model for binary data\n\n",
+ "Links: ",
+ "Identity: u1, u2, u12",
+ "\n"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.vgam(matrix(c(1,1,0, 0,0,1), 3, 2), x,
+ .exchangeable, constraints,
+ intercept.apply = TRUE)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .exchangeable = exchangeable, .zero = zero ))),
+ initialize = expression({
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ ncol.w.max = 1,
+ ncol.y.max = 2,
+ out.wy = TRUE,
+ colsyperw = 2,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+ if (ncol(y) != 2)
+ stop("ncol(y) must be = 2")
+
+ predictors.names <- c("u1", "u2", "u12")
+
+ if (length(mustart) + length(etastart) == 0) {
+ mustart <- matrix(as.numeric(NA), nrow(y), 4)
+ mustart[,1] <- weighted.mean((1-y[,1])*(1-y[,2]), w)
+ mustart[,2] <- weighted.mean((1-y[,1])*y[,2], w)
+ mustart[,3] <- weighted.mean(y[,1]*(1-y[,2]), w)
+ mustart[,4] <- weighted.mean(y[,1]*y[,2], w)
+ if (any(mustart == 0))
+ stop("some combinations of the response not realized")
+ }
+ }),
+ linkinv = function(eta, extra = NULL) {
+ u1 <- eta[,1]
+ u2 <- eta[,2]
+ u12 <- eta[,3]
+ denom <- 1 + exp(u1) + exp(u2) + exp(u1 + u2 + u12)
+ cbind("00" = 1/denom,
+ "01" = exp(u2) / denom,
+ "10" = exp(u1) / denom,
+ "11" = exp(u1+u2+u12) / denom)
+ },
+ last = expression({
+ misc$link = c("u1" = "identity", "u2" = "identity", "u12" = "identity")
+ misc$earg = list(u1 = list(), u2 = list(), u12 = list())
+
+ misc$expected = TRUE
+ misc$multipleResponses <- TRUE
+ }),
+ linkfun = function(mu, extra = NULL) {
+ u0 <- log(mu[,1])
+ u2 <- log(mu[,2]) - u0
+ u1 <- log(mu[,3]) - u0
+ u12 <- log(mu[,4]) - u0 - u1 - u2
+ cbind(u1, u2, u12)
+ },
+ loglikelihood = function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
+ u1 <- eta[,1]
+ u2 <- eta[,2]
+ u12 <- eta[,3]
+ denom <- 1 + exp(u1) + exp(u2) + exp(u1 + u2 + u12)
+ u0 <- -log(denom)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else
+ sum(c(w) *(u0 + u1*y[,1] + u2*y[,2] + u12*y[,1]*y[,2]))
+ },
+ vfamily = c("loglinb2"),
+ deriv = expression({
+ u1 <- eta[,1]
+ u2 <- eta[,2]
+ u12 <- eta[,3]
+ denom <- 1 + exp(u1) + exp(u2) + exp(u1 + u2 + u12)
+ du0.du1 <- -(exp(u1) + exp(u1 + u2 + u12)) / denom
+ du0.du2 <- -(exp(u2) + exp(u1 + u2 + u12)) / denom
+ du0.du12 <- -exp(u1 + u2 + u12) / denom
+ c(w) * cbind(du0.du1 + y[,1],
+ du0.du2 + y[,2],
+ du0.du12 + y[,1] * y[,2])
+ }),
+ weight = expression({
+ d2u0.du1.2 <- -(exp(u1) + exp(u1 + u2 + u12)) * (1+exp(u2)) / denom^2
+ d2u0.du22 <- -(exp(u2) + exp(u1 + u2 + u12)) * (1+exp(u1)) / denom^2
+ d2u0.du122 <- -exp(u1 + u2 + u12) * (1+exp(u1)+exp(u2)) / denom^2
+ d2u0.du1u2 <- -(exp(u1 + u2 + u12) - exp(u1 + u2)) / denom^2
+ d2u0.du1u3 <- -(1 + exp(u2)) * exp(u1 + u2 + u12) / denom^2
+ d2u0.du2u3 <- -(1 + exp(u1)) * exp(u1 + u2 + u12) / denom^2
+
+ wz <- matrix(as.numeric(NA), n, dimm(M))
+ wz[,iam(1,1,M)] <- -d2u0.du1.2
+ wz[,iam(2,2,M)] <- -d2u0.du22
+ wz[,iam(3,3,M)] <- -d2u0.du122
+ wz[,iam(1,2,M)] <- -d2u0.du1u2
+ wz[,iam(1,3,M)] <- -d2u0.du1u3
+ wz[,iam(2,3,M)] <- -d2u0.du2u3
+ c(w) * wz
+ }))
}
loglinb3 <- function(exchangeable = FALSE, zero = NULL)
{
- new("vglmff",
- blurb = c("Log-linear model for trivariate binary data\n\n",
- "Links: ",
- "Identity: u1, u2, u3, u12, u13, u23",
- "\n"),
- constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(c(1,1,1,0,0,0, 0,0,0,1,1,1), 6, 2), x,
- .exchangeable, constraints,
- intercept.apply = TRUE)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .exchangeable = exchangeable, .zero = zero ))),
- initialize = expression({
- y <- as.matrix(y)
- predictors.names <- c("u1", "u2", "u3", "u12", "u13", "u23")
-
- if (ncol(y) != 3)
- stop("ncol(y) must be = 3")
-
- extra$my.expression <- expression({
- u1 <- eta[,1]
- u2 <- eta[,2]
- u3 <- eta[,3]
- u12 <- eta[,4]
- u13 <- eta[,5]
- u23 <- eta[,6]
- denom <- 1 + exp(u1) + exp(u2) + exp(u3) + exp(u1 + u2 + u12) +
- exp(u1 + u3 + u13) + exp(u2 + u3 + u23) +
- exp(u1 + u2 + u3 + u12 + u13 + u23)
- })
-
-
- extra$deriv.expression <- expression({
- allterms <- exp(u1+u2+u3+u12+u13+u23)
- A1 <- exp(u1) + exp(u1 + u2 + u12) + exp(u1 + u3 + u13) +
- allterms
- A2 <- exp(u2) + exp(u1 + u2 + u12) + exp(u2 + u3 + u23) +
- allterms
- A3 <- exp(u3) + exp(u3 + u2 + u23) + exp(u1 + u3 + u13) +
- allterms
- A12 <- exp(u1 + u2 + u12) + allterms
- A13 <- exp(u1 + u3 + u13) + allterms
- A23 <- exp(u2 + u3 + u23) + allterms
- })
-
-
- if (length(mustart) + length(etastart) == 0) {
- mustart <- matrix(as.numeric(NA), nrow(y), 2^3)
- mustart[,1] <- weighted.mean((1-y[,1])*(1-y[,2])*(1-y[,3]), w)
- mustart[,2] <- weighted.mean((1-y[,1])*(1-y[,2])*y[,3], w)
- mustart[,3] <- weighted.mean((1-y[,1])*y[,2]*(1-y[,3]), w)
- mustart[,4] <- weighted.mean((1-y[,1])*y[,2]*y[,3], w)
- mustart[,5] <- weighted.mean(y[,1]*(1-y[,2])*(1-y[,3]), w)
- mustart[,6] <- weighted.mean(y[,1]*(1-y[,2])*y[,3], w)
- mustart[,7] <- weighted.mean(y[,1]*y[,2]*(1-y[,3]), w)
- mustart[,8] <- weighted.mean(y[,1]*y[,2]*y[,3], w)
- if (any(mustart == 0))
- stop("some combinations of the response not realized")
- }
- }),
- linkinv = function(eta, extra = NULL) {
- eval(extra$my.expression)
- cbind("000" = 1,
- "001" = exp(u3),
- "010" = exp(u2),
- "011" = exp(u2+u3+u23),
- "100" = exp(u1),
- "101" = exp(u1+u3+u13),
- "110" = exp(u1+u2+u12),
- "111" = exp(u1+u2+u3+u12+u13+u23)) / denom
- },
- last = expression({
- misc$link = rep("identity", length = M)
- names(misc$link) = predictors.names
- misc$earg = list(u1 = list(), u2 = list(), u3 = list(),
- u12 = list(), u13 = list(), u23 = list())
- }),
- linkfun = function(mu, extra = NULL) {
- u0 <- log(mu[,1])
- u3 <- log(mu[,2]) - u0
- u2 <- log(mu[,3]) - u0
- u23 <- log(mu[,4]) - u0 - u2 - u3
- u1 <- log(mu[,5]) - u0
- u13 <- log(mu[,6]) - u0 - u1 - u3
- u12 <- log(mu[,7]) - u0 - u1 - u2
- cbind(u1, u2, u3, u12, u13, u23)
- },
- loglikelihood = function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
- eval(extra$my.expression)
- u0 <- -log(denom)
- if (residuals)
- stop("loglikelihood residuals not implemented yet") else
- sum(w*(u0 + u1*y[,1] + u2*y[,2] + u3*y[,3] +u12*y[,1]*y[,2] +
- u13*y[,1]*y[,3] + u23*y[,2]*y[,3]))
- },
- vfamily = c("loglinb3"),
- deriv = expression({
- eval(extra$my.expression)
- eval(extra$deriv.expression)
- c(w) * cbind(-A1/denom + y[,1],
- -A2/denom + y[,2],
- -A3/denom + y[,3],
- -A12/denom + y[,1]*y[,2],
- -A13/denom + y[,1]*y[,3],
- -A23/denom + y[,2]*y[,3])
- }),
- weight = expression({
- u0 <- -log(denom)
- dA2.du1 <- exp(u1 + u2 + u12) + allterms
- dA3.du1 <- exp(u1 + u3 + u13) + allterms
- dA3.du2 <- exp(u2 + u3 + u23) + allterms
- wz <- matrix(as.numeric(NA), n, dimm(6))
- expu0 <- exp(u0)
- wz[,iam(1,1,M)] <- A1 * (1 - expu0 * A1)
- wz[,iam(2,2,M)] <- A2 * (1 - expu0 * A2)
- wz[,iam(3,3,M)] <- A3 * (1 - expu0 * A3)
- wz[,iam(1,2,M)] <- (dA2.du1 - expu0 * A1 * A2)
- wz[,iam(1,3,M)] <- (dA3.du1 - expu0 * A1 * A3)
- wz[,iam(2,3,M)] <- (dA3.du2 - expu0 * A2 * A3)
- wz[,iam(4,4,M)] <- A12 * (1 - expu0 * A12)
- wz[,iam(5,5,M)] <- A13 * (1 - expu0 * A13)
- wz[,iam(6,6,M)] <- A23 * (1 - expu0 * A23)
- wz[,iam(4,6,M)] <- (allterms - expu0 * A12 * A23)
- wz[,iam(5,6,M)] <- (allterms - expu0 * A12 * A23)
- wz[,iam(4,5,M)] <- (allterms - expu0 * A12 * A13)
- wz[,iam(1,4,M)] <- A12 * (1 - expu0 * A1)
- wz[,iam(1,5,M)] <- A13 * (1 - expu0 * A1)
- wz[,iam(1,6,M)] <- (allterms - expu0 * A1 * A23)
- wz[,iam(2,4,M)] <- A12 * (1 - expu0 * A2)
- wz[,iam(2,5,M)] <- (allterms - expu0 * A2 * A13)
- wz[,iam(2,6,M)] <- A23 * (1 - expu0 * A2)
- wz[,iam(3,4,M)] <- (allterms - expu0 * A3 * A12)
- wz[,iam(3,5,M)] <- A13 * (1 - expu0 * A3)
- wz[,iam(3,6,M)] <- A23 * (1 - expu0 * A3)
- wz <- expu0 * wz
- c(w) * wz
- }))
+ new("vglmff",
+ blurb = c("Log-linear model for trivariate binary data\n\n",
+ "Links: ",
+ "Identity: u1, u2, u3, u12, u13, u23",
+ "\n"),
+ constraints = eval(substitute(expression({
+ constraints = cm.vgam(matrix(c(1,1,1,0,0,0, 0,0,0,1,1,1), 6, 2), x,
+ .exchangeable, constraints,
+ intercept.apply = TRUE)
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .exchangeable = exchangeable, .zero = zero ))),
+ initialize = expression({
+ predictors.names <- c("u1", "u2", "u3", "u12", "u13", "u23")
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ ncol.w.max = 1,
+ ncol.y.max = 3,
+ out.wy = TRUE,
+ colsyperw = 3,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+
+ if (ncol(y) != 3)
+ stop("ncol(y) must be = 3")
+
+ extra$my.expression <- expression({
+ u1 <- eta[,1]
+ u2 <- eta[,2]
+ u3 <- eta[,3]
+ u12 <- eta[,4]
+ u13 <- eta[,5]
+ u23 <- eta[,6]
+ denom <- 1 + exp(u1) + exp(u2) + exp(u3) + exp(u1 + u2 + u12) +
+ exp(u1 + u3 + u13) + exp(u2 + u3 + u23) +
+ exp(u1 + u2 + u3 + u12 + u13 + u23)
+ })
+
+
+ extra$deriv.expression <- expression({
+ allterms <- exp(u1+u2+u3+u12+u13+u23)
+ A1 <- exp(u1) + exp(u1 + u2 + u12) + exp(u1 + u3 + u13) +
+ allterms
+ A2 <- exp(u2) + exp(u1 + u2 + u12) + exp(u2 + u3 + u23) +
+ allterms
+ A3 <- exp(u3) + exp(u3 + u2 + u23) + exp(u1 + u3 + u13) +
+ allterms
+ A12 <- exp(u1 + u2 + u12) + allterms
+ A13 <- exp(u1 + u3 + u13) + allterms
+ A23 <- exp(u2 + u3 + u23) + allterms
+ })
+
+
+ if (length(mustart) + length(etastart) == 0) {
+ mustart <- matrix(as.numeric(NA), nrow(y), 2^3)
+ mustart[,1] <- weighted.mean((1-y[,1])*(1-y[,2])*(1-y[,3]), w)
+ mustart[,2] <- weighted.mean((1-y[,1])*(1-y[,2])*y[,3], w)
+ mustart[,3] <- weighted.mean((1-y[,1])*y[,2]*(1-y[,3]), w)
+ mustart[,4] <- weighted.mean((1-y[,1])*y[,2]*y[,3], w)
+ mustart[,5] <- weighted.mean(y[,1]*(1-y[,2])*(1-y[,3]), w)
+ mustart[,6] <- weighted.mean(y[,1]*(1-y[,2])*y[,3], w)
+ mustart[,7] <- weighted.mean(y[,1]*y[,2]*(1-y[,3]), w)
+ mustart[,8] <- weighted.mean(y[,1]*y[,2]*y[,3], w)
+ if (any(mustart == 0))
+ stop("some combinations of the response not realized")
+ }
+ }),
+ linkinv = function(eta, extra = NULL) {
+ eval(extra$my.expression)
+ cbind("000" = 1,
+ "001" = exp(u3),
+ "010" = exp(u2),
+ "011" = exp(u2+u3+u23),
+ "100" = exp(u1),
+ "101" = exp(u1+u3+u13),
+ "110" = exp(u1+u2+u12),
+ "111" = exp(u1+u2+u3+u12+u13+u23)) / denom
+ },
+ last = expression({
+ misc$link = rep("identity", length = M)
+ names(misc$link) = predictors.names
+
+ misc$earg = list(u1 = list(), u2 = list(), u3 = list(),
+ u12 = list(), u13 = list(), u23 = list())
+
+ misc$expected = TRUE
+ misc$multipleResponses <- TRUE
+
+ }),
+ linkfun = function(mu, extra = NULL) {
+ u0 <- log(mu[,1])
+ u3 <- log(mu[,2]) - u0
+ u2 <- log(mu[,3]) - u0
+ u23 <- log(mu[,4]) - u0 - u2 - u3
+ u1 <- log(mu[,5]) - u0
+ u13 <- log(mu[,6]) - u0 - u1 - u3
+ u12 <- log(mu[,7]) - u0 - u1 - u2
+ cbind(u1, u2, u3, u12, u13, u23)
+ },
+ loglikelihood = function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
+ eval(extra$my.expression)
+ u0 <- -log(denom)
+ if (residuals)
+ stop("loglikelihood residuals not implemented yet") else
+ sum(c(w) *(u0 + u1*y[,1] + u2*y[,2] + u3*y[,3] +u12*y[,1]*y[,2] +
+ u13*y[,1]*y[,3] + u23*y[,2]*y[,3]))
+ },
+ vfamily = c("loglinb3"),
+ deriv = expression({
+ eval(extra$my.expression)
+ eval(extra$deriv.expression)
+ c(w) * cbind(-A1/denom + y[,1],
+ -A2/denom + y[,2],
+ -A3/denom + y[,3],
+ -A12/denom + y[,1]*y[,2],
+ -A13/denom + y[,1]*y[,3],
+ -A23/denom + y[,2]*y[,3])
+ }),
+ weight = expression({
+ u0 <- -log(denom)
+ dA2.du1 <- exp(u1 + u2 + u12) + allterms
+ dA3.du1 <- exp(u1 + u3 + u13) + allterms
+ dA3.du2 <- exp(u2 + u3 + u23) + allterms
+
+ wz <- matrix(as.numeric(NA), n, dimm(6))
+ expu0 <- exp(u0)
+
+ wz[,iam(1,1,M)] <- A1 * (1 - expu0 * A1)
+ wz[,iam(2,2,M)] <- A2 * (1 - expu0 * A2)
+ wz[,iam(3,3,M)] <- A3 * (1 - expu0 * A3)
+ wz[,iam(1,2,M)] <- (dA2.du1 - expu0 * A1 * A2)
+ wz[,iam(1,3,M)] <- (dA3.du1 - expu0 * A1 * A3)
+ wz[,iam(2,3,M)] <- (dA3.du2 - expu0 * A2 * A3)
+ wz[,iam(4,4,M)] <- A12 * (1 - expu0 * A12)
+ wz[,iam(5,5,M)] <- A13 * (1 - expu0 * A13)
+ wz[,iam(6,6,M)] <- A23 * (1 - expu0 * A23)
+ wz[,iam(4,6,M)] <- (allterms - expu0 * A12 * A23)
+ wz[,iam(5,6,M)] <- (allterms - expu0 * A12 * A23)
+ wz[,iam(4,5,M)] <- (allterms - expu0 * A12 * A13)
+ wz[,iam(1,4,M)] <- A12 * (1 - expu0 * A1)
+ wz[,iam(1,5,M)] <- A13 * (1 - expu0 * A1)
+ wz[,iam(1,6,M)] <- (allterms - expu0 * A1 * A23)
+ wz[,iam(2,4,M)] <- A12 * (1 - expu0 * A2)
+ wz[,iam(2,5,M)] <- (allterms - expu0 * A2 * A13)
+ wz[,iam(2,6,M)] <- A23 * (1 - expu0 * A2)
+ wz[,iam(3,4,M)] <- (allterms - expu0 * A3 * A12)
+ wz[,iam(3,5,M)] <- A13 * (1 - expu0 * A3)
+ wz[,iam(3,6,M)] <- A23 * (1 - expu0 * A3)
+ wz <- expu0 * wz
+ c(w) * wz
+ }))
}
+
diff --git a/R/family.mixture.R b/R/family.mixture.R
index 9dae6f3..8ce3179 100644
--- a/R/family.mixture.R
+++ b/R/family.mixture.R
@@ -8,371 +8,461 @@
-mix2normal1.control <- function(trace = TRUE, ...)
-{
- list(trace=trace)
+
+
+
+
+
+mix2normal1.control <- function(trace = TRUE, ...) {
+ list(trace = trace)
}
-mix2normal1 = function(lphi = "logit",
- lmu = "identity",
- lsd = "loge",
- ephi = list(), emu1 = list(), emu2 = list(),
- esd1 = list(), esd2 = list(),
- iphi=0.5, imu1 = NULL, imu2 = NULL, isd1 = NULL, isd2 = NULL,
- qmu = c(0.2, 0.8),
- equalsd = TRUE,
- nsimEIM = 100,
- zero = 1)
+ mix2normal1 <-
+ function(lphi = "logit",
+ lmu = "identity",
+ lsd = "loge",
+ iphi = 0.5,
+ imu1 = NULL, imu2 = NULL,
+ isd1 = NULL, isd2 = NULL,
+ qmu = c(0.2, 0.8),
+ equalsd = TRUE,
+ nsimEIM = 100,
+ zero = 1)
{
- if (mode(lphi) != "character" && mode(lphi) != "name")
- lphi = as.character(substitute(lphi))
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu = as.character(substitute(lmu))
- if (mode(lsd) != "character" && mode(lsd) != "name")
- lsd = as.character(substitute(lsd))
- if (!is.Numeric(qmu, allowable.length = 2,
- positive = TRUE) ||
- any(qmu >= 1))
- stop("bad input for argument 'qmu'")
- if (length(iphi) &&
- (!is.Numeric(iphi, allowable.length = 1,
- positive = TRUE) ||
- iphi>= 1))
- stop("bad input for argument 'iphi'")
- if (length(imu1) && !is.Numeric(imu1))
- stop("bad input for argument 'imu1'")
- if (length(imu2) && !is.Numeric(imu2))
- stop("bad input for argument 'imu2'")
- if (length(isd1) && !is.Numeric(isd1, positive = TRUE))
- stop("bad input for argument 'isd1'")
- if (length(isd2) && !is.Numeric(isd2, positive = TRUE))
- stop("bad input for argument 'isd2'")
- if (!is.list(ephi)) ephi = list()
- if (!is.list(emu1)) emu1 = list()
- if (!is.list(emu2)) emu2 = list()
- if (!is.list(esd1)) esd1 = list()
- if (!is.list(esd2)) esd2 = list()
- if (!is.logical(equalsd) || length(equalsd) != 1)
- stop("bad input for argument 'equalsd'")
- if (!is.Numeric(nsimEIM, allowable.length = 1,
- integer.valued = TRUE) ||
- nsimEIM <= 10)
- stop("'nsimEIM' should be an integer greater than 10")
-
- new("vglmff",
- blurb = c("Mixture of two univariate normals\n\n",
- "Links: ",
- namesof("phi", lphi, earg = ephi, tag = FALSE), ", ",
- namesof("mu1", lmu, earg = emu1, tag = FALSE), ", ",
- namesof("sd1", lsd, earg = esd1, tag = FALSE), ", ",
- namesof("mu2", lmu, earg = emu2, tag = FALSE), ", ",
- namesof("sd2", lsd, earg = esd2, tag = FALSE), "\n",
- "Mean: phi*mu1 + (1-phi)*mu2\n",
- "Variance: phi*sd1^2 + (1-phi)*sd2^2 + phi*(1-phi)*(mu1-mu2)^2"),
- constraints = eval(substitute(expression({
- constraints = cm.vgam(rbind(diag(4), c(0,0,1,0)), x, .equalsd,
- constraints, intercept.apply = TRUE)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero=zero, .equalsd = equalsd ))),
- initialize = eval(substitute(expression({
- if (ncol(y <- cbind(y)) != 1)
- stop("the response must be a vector or one-column matrix")
- predictors.names = c(
- namesof("phi", .lphi, tag = FALSE),
- namesof("mu1", .lmu, earg = .emu1, tag = FALSE),
- namesof("sd1", .lsd, earg = .esd1, tag = FALSE),
- namesof("mu2", .lmu, earg = .emu2, tag = FALSE),
- namesof("sd2", .lsd, earg = .esd2, tag = FALSE))
- if (!length(etastart)) {
- qy = quantile(y, prob = .qmu)
- init.phi = rep(if(length(.iphi)) .iphi else 0.5, length = n)
- init.mu1 = rep(if(length(.imu1)) .imu1 else qy[1], length = n)
- init.mu2 = rep(if(length(.imu2)) .imu2 else qy[2], length = n)
- ind.1 = if (init.mu1[1] < init.mu2[1]) 1:round(n* init.phi[1]) else
+ lphi <- as.list(substitute(lphi))
+ ephi <- link2list(lphi)
+ lphi <- attr(ephi, "function.name")
+
+ lmu <- as.list(substitute(lmu))
+ emu <- link2list(lmu)
+ lmu <- attr(emu, "function.name")
+
+ lsd <- as.list(substitute(lsd))
+ esd <- link2list(lsd)
+ lsd <- attr(esd, "function.name")
+
+
+ emu1 <- emu2 <- emu
+ esd1 <- esd2 <- esd
+
+
+ if (!is.Numeric(qmu, allowable.length = 2,
+ positive = TRUE) ||
+ any(qmu >= 1))
+ stop("bad input for argument 'qmu'")
+
+
+ if (length(iphi) &&
+ (!is.Numeric(iphi, allowable.length = 1,
+ positive = TRUE) ||
+ iphi>= 1))
+ stop("bad input for argument 'iphi'")
+ if (length(imu1) && !is.Numeric(imu1))
+ stop("bad input for argument 'imu1'")
+ if (length(imu2) && !is.Numeric(imu2))
+ stop("bad input for argument 'imu2'")
+ if (length(isd1) && !is.Numeric(isd1, positive = TRUE))
+ stop("bad input for argument 'isd1'")
+ if (length(isd2) && !is.Numeric(isd2, positive = TRUE))
+ stop("bad input for argument 'isd2'")
+
+
+ if (!is.logical(equalsd) || length(equalsd) != 1)
+ stop("bad input for argument 'equalsd'")
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE) ||
+ nsimEIM <= 10)
+ stop("'nsimEIM' should be an integer greater than 10")
+
+
+ new("vglmff",
+ blurb = c("Mixture of two univariate normals\n\n",
+ "Links: ",
+ namesof("phi", lphi, earg = ephi, tag = FALSE), ", ",
+ namesof("mu1", lmu, earg = emu1, tag = FALSE), ", ",
+ namesof("sd1", lsd, earg = esd1, tag = FALSE), ", ",
+ namesof("mu2", lmu, earg = emu2, tag = FALSE), ", ",
+ namesof("sd2", lsd, earg = esd2, tag = FALSE), "\n",
+ "Mean: phi*mu1 + (1 - phi)*mu2\n",
+ "Variance: phi*sd1^2 + (1 - phi)*sd2^2 + ",
+ "phi*(1 - phi)*(mu1-mu2)^2"),
+ constraints = eval(substitute(expression({
+ constraints = cm.vgam(rbind(diag(4), c(0,0, 1,0)), x, .equalsd,
+ constraints, intercept.apply = TRUE)
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero, .equalsd = equalsd ))),
+ initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 1,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+ predictors.names = c(
+ namesof("phi", .lphi, tag = FALSE),
+ namesof("mu1", .lmu, earg = .emu1, tag = FALSE),
+ namesof("sd1", .lsd, earg = .esd1, tag = FALSE),
+ namesof("mu2", .lmu, earg = .emu2, tag = FALSE),
+ namesof("sd2", .lsd, earg = .esd2, tag = FALSE))
+
+
+
+ if (!length(etastart)) {
+ qy = quantile(y, prob = .qmu)
+ init.phi = rep(if(length(.iphi)) .iphi else 0.5, length = n)
+ init.mu1 = rep(if(length(.imu1)) .imu1 else qy[1], length = n)
+ init.mu2 = rep(if(length(.imu2)) .imu2 else qy[2], length = n)
+ ind.1 = if (init.mu1[1] < init.mu2[1])
+ 1:round(n* init.phi[1]) else
round(n* init.phi[1]):n
- ind.2 = if (init.mu1[1] < init.mu2[1]) round(n* init.phi[1]):n else
+ ind.2 = if (init.mu1[1] < init.mu2[1])
+ round(n* init.phi[1]):n else
1:round(n* init.phi[1])
- sorty = sort(y)
- init.sd1 = rep(if(length(.isd1)) .isd1 else sd(sorty[ind.1]), len=n)
- init.sd2 = rep(if(length(.isd2)) .isd2 else sd(sorty[ind.2]), len=n)
- if ( .equalsd ) {
- init.sd1 = init.sd2 = (init.sd1 + init.sd2)/2
- if (!all.equal( .esd1, .esd2 ))
- stop("'esd1' and 'esd2' must be equal if equalsd = TRUE")
- }
- etastart = cbind(theta2eta(init.phi, .lphi, earg = .ephi),
- theta2eta(init.mu1, .lmu, earg = .emu1),
- theta2eta(init.sd1, .lsd, earg = .esd1),
- theta2eta(init.mu2, .lmu, earg = .emu2),
- theta2eta(init.sd2, .lsd, earg = .esd2))
- }
- }), list(.lphi=lphi, .lmu=lmu, .iphi=iphi, .imu1=imu1, .imu2=imu2,
- .ephi=ephi, .emu1=emu1, .emu2=emu2, .esd1=esd1, .esd2=esd2,
- .equalsd=equalsd,
- .lsd=lsd, .isd1=isd1, .isd2=isd2, .qmu=qmu))),
- linkinv = eval(substitute(function(eta, extra = NULL){
- phi = eta2theta(eta[,1], link = .lphi, earg = .ephi)
- mu1 = eta2theta(eta[,2], link = .lmu, earg = .emu1)
- mu2 = eta2theta(eta[,4], link = .lmu, earg = .emu2)
- phi*mu1 + (1-phi)*mu2
- }, list(.lphi=lphi, .lmu=lmu,
- .ephi=ephi, .emu1=emu1, .emu2=emu2, .esd1=esd1, .esd2=esd2 ))),
- last = eval(substitute(expression({
- misc$link = c("phi"= .lphi, "mu1"= .lmu,
- "sd1"= .lsd, "mu2"= .lmu, "sd2"= .lsd)
- misc$earg = list("phi"= .ephi, "mu1"= .emu1,
- "sd1"= .esd1, "mu2"= .emu2, "sd2"= .esd2)
- misc$expected = TRUE
- misc$equalsd = .equalsd
- misc$nsimEIM = .nsimEIM
- }), list(.lphi=lphi, .lmu=lmu, .lsd=lsd, .equalsd=equalsd,
- .ephi=ephi, .emu1=emu1, .emu2=emu2, .esd1=esd1, .esd2=esd2,
- .nsimEIM=nsimEIM ))),
- loglikelihood = eval(substitute(
- function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
- phi = eta2theta(eta[,1], link = .lphi, earg = .ephi)
- mu1 = eta2theta(eta[,2], link = .lmu, earg = .emu1)
- sd1 = eta2theta(eta[,3], link = .lsd, earg = .esd1)
- mu2 = eta2theta(eta[,4], link = .lmu, earg = .emu2)
- sd2 = eta2theta(eta[,5], link = .lsd, earg = .esd2)
- f1 = dnorm(y, mean=mu1, sd=sd1)
- f2 = dnorm(y, mean=mu2, sd=sd2)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * log(phi*f1 + (1-phi)*f2))
- }, list(.lphi=lphi, .lmu=lmu,
- .ephi=ephi, .emu1=emu1, .emu2=emu2, .esd1=esd1, .esd2=esd2,
- .lsd=lsd ))),
- vfamily = c("mix2normal1"),
- deriv = eval(substitute(expression({
- phi = eta2theta(eta[,1], link = .lphi, earg = .ephi)
- mu1 = eta2theta(eta[,2], link = .lmu, earg = .emu1)
- sd1 = eta2theta(eta[,3], link = .lsd, earg = .esd1)
- mu2 = eta2theta(eta[,4], link = .lmu, earg = .emu2)
- sd2 = eta2theta(eta[,5], link = .lsd, earg = .esd2)
- dphi.deta = dtheta.deta(phi, link = .lphi, earg = .ephi)
- dmu1.deta = dtheta.deta(mu1, link = .lmu, earg = .emu1)
- dmu2.deta = dtheta.deta(mu2, link = .lmu, earg = .emu2)
- dsd1.deta = dtheta.deta(sd1, link = .lsd, earg = .esd1)
- dsd2.deta = dtheta.deta(sd2, link = .lsd, earg = .esd2)
- f1 = dnorm(y, mean=mu1, sd=sd1)
- f2 = dnorm(y, mean=mu2, sd=sd2)
- pdf = phi*f1 + (1-phi)*f2
- z1 = (y-mu1) / sd1
- z2 = (y-mu2) / sd2
- df1.dmu1 = z1 * f1 / sd1
- df2.dmu2 = z2 * f2 / sd2
- df1.dsd1 = (z1^2 - 1) * f1 / sd1
- df2.dsd2 = (z2^2 - 1) * f2 / sd2
- dl.dphi = (f1-f2) / pdf
- dl.dmu1 = phi * df1.dmu1 / pdf
- dl.dmu2 = (1-phi) * df2.dmu2 / pdf
- dl.dsd1 = phi * df1.dsd1 / pdf
- dl.dsd2 = (1-phi) * df2.dsd2 / pdf
- c(w) * cbind(dl.dphi * dphi.deta,
- dl.dmu1 * dmu1.deta,
- dl.dsd1 * dsd1.deta,
- dl.dmu2 * dmu2.deta,
- dl.dsd2 * dsd2.deta)
- }), list(.lphi=lphi, .lmu=lmu, .lsd=lsd,
- .ephi=ephi, .emu1=emu1, .emu2=emu2, .esd1=esd1, .esd2=esd2,
- .nsimEIM=nsimEIM ))),
- weight = eval(substitute(expression({
-
- d3 = deriv3(~ log(
- phi * dnorm((ysim-mu1)/sd1) / sd1 +
- (1-phi) * dnorm((ysim-mu2)/sd2) / sd2),
- c("phi","mu1","sd1","mu2","sd2"), hessian= TRUE)
- run.mean = 0
- for(ii in 1:( .nsimEIM )) {
- ysim = ifelse(runif(n) < phi, rnorm(n,mu1,sd1), rnorm(n,mu2,sd2))
-
- eval.d3 = eval(d3)
- d2l.dthetas2 = attr(eval.d3, "hessian")
- rm(ysim)
-
- temp3 = matrix(0, n, dimm(M))
- for(ss in 1:M)
- for(tt in ss:M)
- temp3[,iam(ss,tt,M)] = -d2l.dthetas2[,ss,tt]
-
- run.mean = ((ii-1) * run.mean + temp3) / ii
- }
- wz = if (intercept.only)
- matrix(colMeans(run.mean), n, dimm(M), byrow = TRUE) else run.mean
-
- dtheta.detas = cbind(dphi.deta,
- dmu1.deta,
- dsd1.deta,
- dmu2.deta,
- dsd2.deta)
- index0 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- wz = wz * dtheta.detas[,index0$row] * dtheta.detas[,index0$col]
- c(w) * wz
- }), list(.lphi=lphi, .lmu=lmu, .nsimEIM=nsimEIM ))))
+ sorty = sort(y)
+ init.sd1 = rep(if(length( .isd1 )) .isd1 else sd(sorty[ind.1]),
+ len = n)
+ init.sd2 = rep(if(length( .isd2 )) .isd2 else sd(sorty[ind.2]),
+ len = n)
+ if ( .equalsd ) {
+ init.sd1 = init.sd2 = (init.sd1 + init.sd2)/2
+ if (!all.equal( .esd1, .esd2 ))
+ stop("'esd1' and 'esd2' must be equal if 'equalsd = TRUE'")
+ }
+ etastart = cbind(theta2eta(init.phi, .lphi, earg = .ephi),
+ theta2eta(init.mu1, .lmu, earg = .emu1),
+ theta2eta(init.sd1, .lsd, earg = .esd1),
+ theta2eta(init.mu2, .lmu, earg = .emu2),
+ theta2eta(init.sd2, .lsd, earg = .esd2))
+ }
+ }), list(.lphi = lphi, .lmu = lmu,
+ .iphi = iphi, .imu1 = imu1, .imu2 = imu2,
+ .ephi = ephi, .emu1 = emu1, .emu2 = emu2,
+ .esd1 = esd1, .esd2 = esd2, .equalsd = equalsd,
+ .lsd = lsd, .isd1 = isd1, .isd2 = isd2, .qmu = qmu))),
+ linkinv = eval(substitute(function(eta, extra = NULL){
+ phi = eta2theta(eta[, 1], link = .lphi, earg = .ephi)
+ mu1 = eta2theta(eta[, 2], link = .lmu, earg = .emu1)
+ mu2 = eta2theta(eta[, 4], link = .lmu, earg = .emu2)
+ phi * mu1 + (1 - phi) * mu2
+ }, list( .lphi = lphi, .lmu = lmu,
+ .ephi = ephi, .emu1 = emu1, .emu2 = emu2,
+ .esd1 = esd1, .esd2 = esd2 ))),
+ last = eval(substitute(expression({
+ misc$link = c("phi" = .lphi, "mu1" = .lmu,
+ "sd1" = .lsd, "mu2" = .lmu, "sd2" = .lsd)
+
+ misc$earg = list("phi" = .ephi, "mu1" = .emu1,
+ "sd1" = .esd1, "mu2" = .emu2, "sd2" = .esd2)
+
+ misc$expected = TRUE
+ misc$equalsd = .equalsd
+ misc$nsimEIM = .nsimEIM
+ misc$multipleResponses <- FALSE
+ }), list(.lphi = lphi, .lmu = lmu, .lsd = lsd, .equalsd = equalsd,
+ .ephi = ephi, .emu1 = emu1, .emu2 = emu2,
+ .esd1 = esd1, .esd2 = esd2,
+ .nsimEIM = nsimEIM ))),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
+ phi = eta2theta(eta[, 1], link = .lphi, earg = .ephi)
+ mu1 = eta2theta(eta[, 2], link = .lmu, earg = .emu1)
+ sd1 = eta2theta(eta[, 3], link = .lsd, earg = .esd1)
+ mu2 = eta2theta(eta[, 4], link = .lmu, earg = .emu2)
+ sd2 = eta2theta(eta[, 5], link = .lsd, earg = .esd2)
+ f1 = dnorm(y, mean=mu1, sd=sd1)
+ f2 = dnorm(y, mean=mu2, sd=sd2)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(w * log(phi*f1 + (1 - phi)*f2))
+ }, list(.lphi = lphi, .lmu = lmu,
+ .ephi = ephi, .emu1 = emu1, .emu2 = emu2,
+ .esd1 = esd1, .esd2 = esd2,
+ .lsd = lsd ))),
+ vfamily = c("mix2normal1"),
+ deriv = eval(substitute(expression({
+ phi = eta2theta(eta[, 1], link = .lphi, earg = .ephi)
+ mu1 = eta2theta(eta[, 2], link = .lmu, earg = .emu1)
+ sd1 = eta2theta(eta[, 3], link = .lsd, earg = .esd1)
+ mu2 = eta2theta(eta[, 4], link = .lmu, earg = .emu2)
+ sd2 = eta2theta(eta[, 5], link = .lsd, earg = .esd2)
+ dphi.deta = dtheta.deta(phi, link = .lphi, earg = .ephi)
+ dmu1.deta = dtheta.deta(mu1, link = .lmu, earg = .emu1)
+ dmu2.deta = dtheta.deta(mu2, link = .lmu, earg = .emu2)
+ dsd1.deta = dtheta.deta(sd1, link = .lsd, earg = .esd1)
+ dsd2.deta = dtheta.deta(sd2, link = .lsd, earg = .esd2)
+ f1 = dnorm(y, mean=mu1, sd=sd1)
+ f2 = dnorm(y, mean=mu2, sd=sd2)
+ pdf = phi*f1 + (1 - phi)*f2
+ z1 = (y-mu1) / sd1
+ z2 = (y-mu2) / sd2
+ df1.dmu1 = z1 * f1 / sd1
+ df2.dmu2 = z2 * f2 / sd2
+ df1.dsd1 = (z1^2 - 1) * f1 / sd1
+ df2.dsd2 = (z2^2 - 1) * f2 / sd2
+ dl.dphi = (f1-f2) / pdf
+ dl.dmu1 = phi * df1.dmu1 / pdf
+ dl.dmu2 = (1 - phi) * df2.dmu2 / pdf
+ dl.dsd1 = phi * df1.dsd1 / pdf
+ dl.dsd2 = (1 - phi) * df2.dsd2 / pdf
+ c(w) * cbind(dl.dphi * dphi.deta,
+ dl.dmu1 * dmu1.deta,
+ dl.dsd1 * dsd1.deta,
+ dl.dmu2 * dmu2.deta,
+ dl.dsd2 * dsd2.deta)
+ }), list(.lphi = lphi, .lmu = lmu, .lsd = lsd,
+ .ephi = ephi, .emu1 = emu1, .emu2 = emu2,
+ .esd1 = esd1, .esd2 = esd2,
+ .nsimEIM = nsimEIM ))),
+ weight = eval(substitute(expression({
+
+ d3 = deriv3(~ log(
+ phi * dnorm((ysim-mu1)/sd1) / sd1 +
+ (1 - phi) * dnorm((ysim-mu2)/sd2) / sd2),
+ c("phi","mu1","sd1","mu2","sd2"), hessian= TRUE)
+ run.mean = 0
+ for(ii in 1:( .nsimEIM )) {
+ ysim = ifelse(runif(n) < phi, rnorm(n, mu1, sd1),
+ rnorm(n, mu2, sd2))
+
+ eval.d3 = eval(d3)
+ d2l.dthetas2 = attr(eval.d3, "hessian")
+ rm(ysim)
+
+ temp3 = matrix(0, n, dimm(M))
+ for(ss in 1:M)
+ for(tt in ss:M)
+ temp3[,iam(ss,tt, M)] = -d2l.dthetas2[,ss,tt]
+
+ run.mean = ((ii-1) * run.mean + temp3) / ii
+ }
+ wz = if (intercept.only)
+ matrix(colMeans(run.mean), n, dimm(M), byrow = TRUE) else
+ run.mean
+
+ dtheta.detas = cbind(dphi.deta,
+ dmu1.deta,
+ dsd1.deta,
+ dmu2.deta,
+ dsd2.deta)
+ index0 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ wz = wz * dtheta.detas[, index0$row] *
+ dtheta.detas[, index0$col]
+ c(w) * wz
+ }), list(.lphi = lphi, .lmu = lmu, .nsimEIM = nsimEIM ))))
}
-mix2poisson.control <- function(trace = TRUE, ...)
-{
- list(trace=trace)
+mix2poisson.control <- function(trace = TRUE, ...) {
+ list(trace = trace)
}
-mix2poisson = function(lphi = "logit", llambda = "loge",
- ephi = list(), el1 = list(), el2 = list(),
- iphi = 0.5, il1 = NULL, il2 = NULL,
- qmu = c(0.2, 0.8), nsimEIM = 100, zero = 1)
+ mix2poisson <- function(lphi = "logit", llambda = "loge",
+ iphi = 0.5, il1 = NULL, il2 = NULL,
+ qmu = c(0.2, 0.8), nsimEIM = 100, zero = 1)
{
- if (mode(lphi) != "character" && mode(lphi) != "name")
- lphi = as.character(substitute(lphi))
- if (mode(llambda) != "character" && mode(llambda) != "name")
- llambda = as.character(substitute(llambda))
-
- if (!is.Numeric(qmu, allowable.length = 2, positive = TRUE) ||
- any(qmu >= 1))
- stop("bad input for argument 'qmu'")
- if (length(iphi) &&
- (!is.Numeric(iphi, allowable.length = 1, positive = TRUE) ||
- iphi >= 1))
- stop("bad input for argument 'iphi'")
- if (length(il1) && !is.Numeric(il1))
- stop("bad input for argument 'il1'")
- if (length(il2) && !is.Numeric(il2))
- stop("bad input for argument 'il2'")
-
- if (!is.list(ephi)) ephi = list()
- if (!is.list(el1)) el1 = list()
- if (!is.list(el2)) el2 = list()
- if (!is.Numeric(nsimEIM, allowable.length = 1,
- integer.valued = TRUE) ||
- nsimEIM <= 10)
- stop("'nsimEIM' should be an integer greater than 10")
-
- new("vglmff",
- blurb = c("Mixture of two Poisson distributions\n\n",
- "Links: ",
- namesof("phi",lphi, earg = ephi), ", ",
- namesof("lambda1", llambda, earg = el1, tag = FALSE), ", ",
- namesof("lambda2", llambda, earg = el2, tag = FALSE), "\n",
- "Mean: phi*lambda1 + (1-phi)*lambda2"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list(.zero=zero ))),
- initialize = eval(substitute(expression({
- if (ncol(y <- cbind(y)) != 1)
- stop("the response must be a vector or one-column matrix")
- predictors.names = c(namesof("phi", .lphi, earg = .ephi, tag = FALSE),
- namesof("lambda1", .llambda, earg = .el1, tag = FALSE),
- namesof("lambda2", .llambda, earg = .el2, tag = FALSE))
- if (!length(etastart)) {
- qy = quantile(y, prob= .qmu)
- init.phi = rep(if(length(.iphi)) .iphi else 0.5, length = n)
- init.lambda1 = rep(if(length(.il1)) .il1 else qy[1], length = n)
- init.lambda2 = rep(if(length(.il2)) .il2 else qy[2], length = n)
- if (!length(etastart))
- etastart = cbind(theta2eta(init.phi, .lphi, earg = .ephi),
- theta2eta(init.lambda1, .llambda, earg = .el1),
- theta2eta(init.lambda2, .llambda, earg = .el2))
- }
- }), list(.lphi=lphi, .llambda=llambda, .iphi=iphi, .il1=il1, .il2=il2,
- .ephi=ephi, .el1=el1, .el2=el2,
- .qmu=qmu))),
- linkinv = eval(substitute(function(eta, extra = NULL){
- phi = eta2theta(eta[,1], link = .lphi, earg = .ephi)
- lambda1 = eta2theta(eta[,2], link = .llambda, earg = .el1)
- lambda2 = eta2theta(eta[,3], link = .llambda, earg = .el2)
- phi*lambda1 + (1-phi)*lambda2
- }, list(.lphi=lphi, .llambda=llambda,
- .ephi=ephi, .el1=el1, .el2=el2 ))),
- last = eval(substitute(expression({
- misc$link = c("phi"= .lphi, "lambda1"= .llambda, "lambda2"= .llambda)
- misc$earg = list("phi"= .ephi, "lambda1"= .el1, "lambda2"= .el2)
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
- }), list(.lphi=lphi, .llambda=llambda,
- .ephi=ephi, .el1=el1, .el2=el2, .nsimEIM=nsimEIM ))),
- loglikelihood = eval(substitute(
- function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
- phi = eta2theta(eta[,1], link = .lphi, earg = .ephi)
- lambda1 = eta2theta(eta[,2], link = .llambda, earg = .el1)
- lambda2 = eta2theta(eta[,3], link = .llambda, earg = .el2)
- f1 = dpois(y, lam=lambda1)
- f2 = dpois(y, lam=lambda2)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * log(phi*f1 + (1-phi)*f2))
- }, list(.lphi=lphi, .llambda=llambda,
- .ephi=ephi, .el1=el1, .el2=el2 ))),
- vfamily = c("mix2poisson"),
- deriv = eval(substitute(expression({
- phi = eta2theta(eta[,1], link = .lphi, earg = .ephi)
- lambda1 = eta2theta(eta[,2], link = .llambda, earg = .el1)
- lambda2 = eta2theta(eta[,3], link = .llambda, earg = .el2)
- dphi.deta = dtheta.deta(phi, link = .lphi, earg = .ephi)
- dlambda1.deta = dtheta.deta(lambda1, link = .llambda, earg = .el1)
- dlambda2.deta = dtheta.deta(lambda2, link = .llambda, earg = .el2)
- f1 = dpois(x=y, lam=lambda1)
- f2 = dpois(x=y, lam=lambda2)
- pdf = phi*f1 + (1-phi)*f2
- df1.dlambda1 = dpois(y-1, lam=lambda1) - f1
- df2.dlambda2 = dpois(y-1, lam=lambda2) - f2
- dl.dphi = (f1-f2) / pdf
- dl.dlambda1 = phi * df1.dlambda1 / pdf
- dl.dlambda2 = (1-phi) * df2.dlambda2 / pdf
- c(w) * cbind(dl.dphi * dphi.deta,
- dl.dlambda1 * dlambda1.deta,
- dl.dlambda2 * dlambda2.deta)
- }), list(.lphi=lphi, .llambda=llambda,
- .ephi=ephi, .el1=el1, .el2=el2, .nsimEIM=nsimEIM ))),
- weight = eval(substitute(expression({
- run.mean = 0
- for(ii in 1:( .nsimEIM )) {
- ysim = ifelse(runif(n) < phi, rpois(n,lambda1), rpois(n,lambda2))
- f1 = dpois(x=ysim, lam=lambda1)
- f2 = dpois(x=ysim, lam=lambda2)
- pdf = phi*f1 + (1-phi)*f2
- df1.dlambda1 = dpois(ysim-1, lam=lambda1) - f1
- df2.dlambda2 = dpois(ysim-1, lam=lambda2) - f2
- dl.dphi = (f1-f2) / pdf
- dl.dlambda1 = phi * df1.dlambda1 / pdf
- dl.dlambda2 = (1-phi) * df2.dlambda2 / pdf
- d2f1.dlambda12 = dpois(ysim-2,lambda1) - 2*dpois(ysim-1,lambda1) +
- dpois(ysim,lambda1)
- d2f2.dlambda22 = dpois(ysim-2,lambda2) - 2*dpois(ysim-1,lambda2) +
- dpois(ysim,lambda2)
- d2l.dphi2 = dl.dphi^2
- d2l.dlambda12 = phi * (phi * df1.dlambda1^2 / pdf -
- d2f1.dlambda12) / pdf
- d2l.dlambda22 = (1-phi) * ((1-phi) * df2.dlambda2^2 / pdf -
- d2f2.dlambda22) / pdf
- d2l.dlambda1lambda2 = phi * (1-phi) *
- df1.dlambda1 * df2.dlambda2 / pdf^2
- d2l.dphilambda1 = df1.dlambda1 * (phi*(f1-f2)/pdf - 1) / pdf
- d2l.dphilambda2 = df2.dlambda2 * ((1-phi)*(f1-f2)/pdf - 1) / pdf
-
- rm(ysim)
- temp3 = matrix(0, n, dimm(M))
- temp3[,iam(1,1,M=3)] = d2l.dphi2
- temp3[,iam(2,2,M=3)] = d2l.dlambda12
- temp3[,iam(3,3,M=3)] = d2l.dlambda22
- temp3[,iam(1,2,M=3)] = d2l.dphilambda1
- temp3[,iam(1,3,M=3)] = d2l.dphilambda2
- temp3[,iam(2,3,M=3)] = d2l.dlambda1lambda2
- run.mean = ((ii-1) * run.mean + temp3) / ii
- }
- wz = if (intercept.only)
- matrix(colMeans(run.mean), n, dimm(M), byrow = TRUE) else run.mean
-
- dtheta.detas = cbind(dphi.deta, dlambda1.deta, dlambda2.deta)
- index0 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- wz = wz * dtheta.detas[,index0$row] * dtheta.detas[,index0$col]
- c(w) * wz
- }), list(.lphi=lphi, .llambda=llambda,
- .ephi=ephi, .el1=el1, .el2=el2, .nsimEIM=nsimEIM ))))
+
+ lphi <- as.list(substitute(lphi))
+ ephi <- link2list(lphi)
+ lphi <- attr(ephi, "function.name")
+
+ llambda <- as.list(substitute(llambda))
+ elambda <- link2list(llambda)
+ llambda <- attr(elambda, "function.name")
+
+ el1 <- el2 <- elambda
+
+
+
+ if (!is.Numeric(qmu, allowable.length = 2, positive = TRUE) ||
+ any(qmu >= 1))
+ stop("bad input for argument 'qmu'")
+ if (length(iphi) &&
+ (!is.Numeric(iphi, allowable.length = 1, positive = TRUE) ||
+ iphi >= 1))
+ stop("bad input for argument 'iphi'")
+ if (length(il1) && !is.Numeric(il1))
+ stop("bad input for argument 'il1'")
+ if (length(il2) && !is.Numeric(il2))
+ stop("bad input for argument 'il2'")
+
+
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE) ||
+ nsimEIM <= 10)
+ stop("'nsimEIM' should be an integer greater than 10")
+
+
+ new("vglmff",
+ blurb = c("Mixture of two Poisson distributions\n\n",
+ "Links: ",
+ namesof("phi",lphi, earg = ephi), ", ",
+ namesof("lambda1", llambda, earg = el1, tag = FALSE), ", ",
+ namesof("lambda2", llambda, earg = el2, tag = FALSE), "\n",
+ "Mean: phi*lambda1 + (1 - phi)*lambda2"),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ ncol.w.max = 1,
+ ncol.y.max = 1,
+ Is.integer.y = TRUE,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+
+ predictors.names =
+ c(namesof("phi", .lphi , earg = .ephi , tag = FALSE),
+ namesof("lambda1", .llambda , earg = .el1 , tag = FALSE),
+ namesof("lambda2", .llambda , earg = .el2 , tag = FALSE))
+
+ if (!length(etastart)) {
+ qy = quantile(y, prob = .qmu)
+ init.phi = rep(if(length(.iphi)) .iphi else 0.5, length = n)
+ init.lambda1 = rep(if(length(.il1)) .il1 else qy[1], length = n)
+ init.lambda2 = rep(if(length(.il2)) .il2 else qy[2], length = n)
+
+ if (!length(etastart))
+ etastart = cbind(theta2eta(init.phi, .lphi , earg = .ephi ),
+ theta2eta(init.lambda1, .llambda , earg = .el1 ),
+ theta2eta(init.lambda2, .llambda , earg = .el2 ))
+ }
+ }), list(.lphi = lphi, .llambda = llambda,
+ .ephi = ephi, .el1 = el1, .el2 = el2,
+ .iphi = iphi, .il1 = il1, .il2 = il2,
+ .qmu = qmu))),
+ linkinv = eval(substitute(function(eta, extra = NULL){
+ phi = eta2theta(eta[, 1], link = .lphi , earg = .ephi )
+ lambda1 = eta2theta(eta[, 2], link = .llambda , earg = .el1 )
+ lambda2 = eta2theta(eta[, 3], link = .llambda , earg = .el2 )
+ phi * lambda1 + (1 - phi) * lambda2
+ }, list(.lphi = lphi, .llambda = llambda,
+ .ephi = ephi, .el1 = el1, .el2 = el2 ))),
+ last = eval(substitute(expression({
+ misc$link =
+ c("phi" = .lphi, "lambda1" = .llambda, "lambda2" = .llambda )
+
+ misc$earg =
+ list("phi" = .ephi, "lambda1" = .el1, "lambda2" = .el2 )
+
+ misc$expected = TRUE
+ misc$nsimEIM = .nsimEIM
+ misc$multipleResponses <- FALSE
+ }), list(.lphi = lphi, .llambda = llambda,
+ .ephi = ephi, .el1 = el1, .el2 = el2,
+ .nsimEIM = nsimEIM ))),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
+ phi = eta2theta(eta[, 1], link = .lphi, earg = .ephi)
+ lambda1 = eta2theta(eta[, 2], link = .llambda, earg = .el1)
+ lambda2 = eta2theta(eta[, 3], link = .llambda, earg = .el2)
+ f1 = dpois(y, lam = lambda1)
+ f2 = dpois(y, lam = lambda2)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(w * log(phi*f1 + (1 - phi)*f2))
+ }, list(.lphi = lphi, .llambda = llambda,
+ .ephi = ephi, .el1 = el1, .el2 = el2 ))),
+ vfamily = c("mix2poisson"),
+ deriv = eval(substitute(expression({
+ phi = eta2theta(eta[, 1], link = .lphi, earg = .ephi)
+ lambda1 = eta2theta(eta[, 2], link = .llambda, earg = .el1)
+ lambda2 = eta2theta(eta[, 3], link = .llambda, earg = .el2)
+
+ dphi.deta = dtheta.deta(phi, link = .lphi, earg = .ephi)
+ dlambda1.deta = dtheta.deta(lambda1, link = .llambda, earg = .el1)
+ dlambda2.deta = dtheta.deta(lambda2, link = .llambda, earg = .el2)
+
+ f1 = dpois(x = y, lam = lambda1)
+ f2 = dpois(x = y, lam = lambda2)
+ pdf = phi*f1 + (1 - phi)*f2
+ df1.dlambda1 = dpois(y-1, lam = lambda1) - f1
+ df2.dlambda2 = dpois(y-1, lam = lambda2) - f2
+ dl.dphi = (f1-f2) / pdf
+ dl.dlambda1 = phi * df1.dlambda1 / pdf
+ dl.dlambda2 = (1 - phi) * df2.dlambda2 / pdf
+
+ c(w) * cbind(dl.dphi * dphi.deta,
+ dl.dlambda1 * dlambda1.deta,
+ dl.dlambda2 * dlambda2.deta)
+ }), list(.lphi = lphi, .llambda = llambda,
+ .ephi = ephi, .el1 = el1, .el2 = el2,
+ .nsimEIM = nsimEIM ))),
+ weight = eval(substitute(expression({
+ run.mean = 0
+ for(ii in 1:( .nsimEIM )) {
+ ysim = ifelse(runif(n) < phi, rpois(n, lambda1),
+ rpois(n, lambda2))
+ f1 = dpois(x = ysim, lam = lambda1)
+ f2 = dpois(x = ysim, lam = lambda2)
+ pdf = phi*f1 + (1 - phi)*f2
+
+ df1.dlambda1 = dpois(ysim-1, lam = lambda1) - f1
+ df2.dlambda2 = dpois(ysim-1, lam = lambda2) - f2
+
+ dl.dphi = (f1 - f2) / pdf
+ dl.dlambda1 = phi * df1.dlambda1 / pdf
+ dl.dlambda2 = (1 - phi) * df2.dlambda2 / pdf
+
+ d2f1.dlambda12 = dpois(ysim-2, lambda1) -
+ 2*dpois(ysim-1, lambda1) +
+ dpois(ysim, lambda1)
+ d2f2.dlambda22 = dpois(ysim-2, lambda2) -
+ 2*dpois(ysim-1, lambda2) +
+ dpois(ysim, lambda2)
+ d2l.dphi2 = dl.dphi^2
+ d2l.dlambda12 = phi * (phi * df1.dlambda1^2 / pdf -
+ d2f1.dlambda12) / pdf
+ d2l.dlambda22 = (1 - phi) * ((1 - phi) * df2.dlambda2^2 / pdf -
+ d2f2.dlambda22) / pdf
+ d2l.dlambda1lambda2 = phi * (1 - phi) *
+ df1.dlambda1 * df2.dlambda2 / pdf^2
+ d2l.dphilambda1 = df1.dlambda1 * (phi*(f1-f2)/pdf - 1) / pdf
+ d2l.dphilambda2 = df2.dlambda2 * ((1 - phi)*(f1-f2)/pdf - 1) / pdf
+
+ rm(ysim)
+ temp3 = matrix(0, n, dimm(M))
+ temp3[,iam(1, 1, M = 3)] = d2l.dphi2
+ temp3[,iam(2, 2, M = 3)] = d2l.dlambda12
+ temp3[,iam(3, 3, M = 3)] = d2l.dlambda22
+ temp3[,iam(1, 2, M = 3)] = d2l.dphilambda1
+ temp3[,iam(1, 3, M = 3)] = d2l.dphilambda2
+ temp3[,iam(2, 3, M = 3)] = d2l.dlambda1lambda2
+ run.mean = ((ii-1) * run.mean + temp3) / ii
+ }
+
+ wz = if (intercept.only)
+ matrix(colMeans(run.mean), n, dimm(M), byrow = TRUE) else
+ run.mean
+
+ dtheta.detas = cbind(dphi.deta, dlambda1.deta, dlambda2.deta)
+ index0 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ wz = wz * dtheta.detas[, index0$row] *
+ dtheta.detas[, index0$col]
+
+ c(w) * wz
+ }), list(.lphi = lphi, .llambda = llambda,
+ .ephi = ephi, .el1 = el1, .el2 = el2,
+ .nsimEIM = nsimEIM ))))
}
@@ -380,161 +470,197 @@ mix2poisson = function(lphi = "logit", llambda = "loge",
mix2exp.control <- function(trace = TRUE, ...) {
- list(trace = trace)
+ list(trace = trace)
}
-mix2exp = function(lphi = "logit", llambda = "loge",
- ephi = list(), el1 = list(), el2 = list(),
- iphi=0.5, il1 = NULL, il2 = NULL,
- qmu = c(0.8, 0.2), nsimEIM = 100, zero = 1)
+ mix2exp <- function(lphi = "logit", llambda = "loge",
+ iphi = 0.5, il1 = NULL, il2 = NULL,
+ qmu = c(0.8, 0.2), nsimEIM = 100, zero = 1)
{
- if (mode(lphi) != "character" && mode(lphi) != "name")
- lphi = as.character(substitute(lphi))
- if (mode(llambda) != "character" && mode(llambda) != "name")
- llambda = as.character(substitute(llambda))
-
- if (!is.Numeric(qmu, allowable.length = 2, positive = TRUE) ||
- any(qmu >= 1))
- stop("bad input for argument 'qmu'")
- if (length(iphi) &&
- (!is.Numeric(iphi, allowable.length = 1, positive = TRUE) ||
- iphi >= 1))
- stop("bad input for argument 'iphi'")
- if (length(il1) && !is.Numeric(il1))
- stop("bad input for argument 'il1'")
- if (length(il2) && !is.Numeric(il2))
- stop("bad input for argument 'il2'")
-
- if (!is.list(ephi)) ephi = list()
- if (!is.list(el1)) el1 = list()
- if (!is.list(el2)) el2 = list()
-
- if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE) ||
- nsimEIM <= 10)
- stop("'nsimEIM' should be an integer greater than 10")
-
-
- new("vglmff",
- blurb = c("Mixture of two univariate exponentials\n\n",
- "Links: ",
- namesof("phi", lphi, earg = ephi), ", ",
- namesof("lambda1", llambda, earg = el1, tag = FALSE), ", ",
- namesof("lambda2", llambda, earg = el2, tag = FALSE), "\n",
- "Mean: phi/lambda1 + (1-phi)/lambda2\n"),
-
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list(.zero=zero ))),
-
- initialize = eval(substitute(expression({
- if (ncol(y <- cbind(y)) != 1)
- stop("the response must be a vector or one-column matrix")
- predictors.names = c(namesof("phi", .lphi, earg = .ephi, tag = FALSE),
- namesof("lambda1", .llambda, earg = .el1,tag = FALSE),
- namesof("lambda2", .llambda, earg = .el2,tag = FALSE))
- if (!length(etastart)) {
- qy = quantile(y, prob= .qmu)
- init.phi = rep(if(length(.iphi)) .iphi else 0.5, length = n)
- init.lambda1 = rep(if(length(.il1)) .il1 else 1/qy[1], length = n)
- init.lambda2 = rep(if(length(.il2)) .il2 else 1/qy[2], length = n)
- if (!length(etastart))
- etastart = cbind(theta2eta(init.phi, .lphi, earg = .ephi),
- theta2eta(init.lambda1, .llambda, earg = .el1),
- theta2eta(init.lambda2, .llambda, earg = .el2))
- }
- }), list(.lphi=lphi, .llambda=llambda, .iphi=iphi, .il1=il1, .il2=il2,
- .ephi=ephi, .el1=el1, .el2=el2,
- .qmu=qmu))),
- linkinv = eval(substitute(function(eta, extra = NULL){
- phi = eta2theta(eta[,1], link = .lphi, earg = .ephi)
- lambda1 = eta2theta(eta[,2], link = .llambda, earg = .el1)
- lambda2 = eta2theta(eta[,3], link = .llambda, earg = .el2)
- phi/lambda1 + (1-phi)/lambda2
- }, list(.lphi=lphi, .llambda=llambda,
- .ephi=ephi, .el1=el1, .el2=el2 ))),
- last = eval(substitute(expression({
- misc$link =
- c("phi" = .lphi, "lambda1" = .llambda, "lambda2" = .llambda)
- misc$earg =
- list("phi" = .ephi, "lambda1" = .el1, "lambda2" = .el2)
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
- }), list(.lphi=lphi, .llambda=llambda, .nsimEIM=nsimEIM,
- .ephi=ephi, .el1=el1, .el2=el2 ))),
- loglikelihood = eval(substitute(
- function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
- phi = eta2theta(eta[,1], link = .lphi, earg = .ephi)
- lambda1 = eta2theta(eta[,2], link = .llambda, earg = .el1)
- lambda2 = eta2theta(eta[,3], link = .llambda, earg = .el2)
- f1 = dexp(y, rate=lambda1)
- f2 = dexp(y, rate=lambda2)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * log(phi*f1 + (1-phi)*f2))
- }, list(.lphi = lphi, .llambda = llambda,
- .ephi = ephi, .el1 = el1, .el2 = el2 ))),
- vfamily = c("mix2exp"),
- deriv = eval(substitute(expression({
- phi = eta2theta(eta[,1], link = .lphi, earg = .ephi)
- lambda1 = eta2theta(eta[,2], link = .llambda, earg = .el1)
- lambda2 = eta2theta(eta[,3], link = .llambda, earg = .el2)
- dphi.deta = dtheta.deta(phi, link = .lphi, earg = .ephi)
- dlambda1.deta = dtheta.deta(lambda1, link = .llambda, earg = .el1)
- dlambda2.deta = dtheta.deta(lambda2, link = .llambda, earg = .el2)
- f1 = dexp(x=y, rate=lambda1)
- f2 = dexp(x=y, rate=lambda2)
- pdf = phi*f1 + (1-phi)*f2
- df1.dlambda1 = exp(-lambda1*y) - y * dexp(y, rate=lambda1)
- df2.dlambda2 = exp(-lambda2*y) - y * dexp(y, rate=lambda2)
- dl.dphi = (f1-f2) / pdf
- dl.dlambda1 = phi * df1.dlambda1 / pdf
- dl.dlambda2 = (1-phi) * df2.dlambda2 / pdf
- c(w) * cbind(dl.dphi * dphi.deta,
- dl.dlambda1 * dlambda1.deta,
- dl.dlambda2 * dlambda2.deta)
- }), list(.lphi=lphi, .llambda=llambda,
- .ephi=ephi, .el1=el1, .el2=el2 ))),
- weight = eval(substitute(expression({
- run.mean = 0
- for(ii in 1:( .nsimEIM )) {
- ysim = ifelse(runif(n) < phi, rexp(n,lambda1), rexp(n,lambda2))
- f1 = dexp(x=ysim, rate=lambda1)
- f2 = dexp(x=ysim, rate=lambda2)
- pdf = phi*f1 + (1-phi)*f2
- df1.dlambda1 = exp(-lambda1*ysim) - ysim * dexp(ysim, rate=lambda1)
- df2.dlambda2 = exp(-lambda2*ysim) - ysim * dexp(ysim, rate=lambda2)
- dl.dphi = (f1-f2) / pdf
- dl.dlambda1 = phi * df1.dlambda1 / pdf
- dl.dlambda2 = (1-phi) * df2.dlambda2 / pdf
- d2f1.dlambda12 = ysim*(ysim*lambda1-2)*exp(-lambda1*ysim)
- d2f2.dlambda22 = ysim*(ysim*lambda2-2)*exp(-lambda2*ysim)
- d2l.dphi2 = dl.dphi^2
- d2l.dlambda12 = phi * (phi * df1.dlambda1^2 / pdf -
- d2f1.dlambda12) / pdf
- d2l.dlambda22 = (1-phi) * ((1-phi) * df2.dlambda2^2 / pdf -
- d2f2.dlambda22) / pdf
- d2l.dlambda1lambda2 = phi * (1-phi) *
- df1.dlambda1 * df2.dlambda2 / pdf^2
- d2l.dphilambda1 = df1.dlambda1 * (phi*(f1-f2)/pdf - 1) / pdf
- d2l.dphilambda2 = df2.dlambda2 * ((1-phi)*(f1-f2)/pdf - 1) / pdf
- rm(ysim)
- temp3 = matrix(0, n, dimm(M))
- temp3[,iam(1,1,M=3)] = d2l.dphi2
- temp3[,iam(2,2,M=3)] = d2l.dlambda12
- temp3[,iam(3,3,M=3)] = d2l.dlambda22
- temp3[,iam(1,2,M=3)] = d2l.dphilambda1
- temp3[,iam(1,3,M=3)] = d2l.dphilambda2
- temp3[,iam(2,3,M=3)] = d2l.dlambda1lambda2
- run.mean = ((ii-1) * run.mean + temp3) / ii
- }
- wz = if (intercept.only)
- matrix(colMeans(run.mean), n, dimm(M), byrow = TRUE) else run.mean
-
- dtheta.detas = cbind(dphi.deta, dlambda1.deta, dlambda2.deta)
- index0 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- wz = wz * dtheta.detas[,index0$row] * dtheta.detas[,index0$col]
- c(w) * wz
- }), list(.lphi=lphi, .llambda=llambda,
- .ephi=ephi, .el1=el1, .el2=el2, .nsimEIM=nsimEIM ))))
+ lphi <- as.list(substitute(lphi))
+ ephi <- link2list(lphi)
+ lphi <- attr(ephi, "function.name")
+
+ llambda <- as.list(substitute(llambda))
+ elambda <- link2list(llambda)
+ llambda <- attr(elambda, "function.name")
+
+ el1 <- el2 <- elambda
+
+
+ if (!is.Numeric(qmu, allowable.length = 2, positive = TRUE) ||
+ any(qmu >= 1))
+ stop("bad input for argument 'qmu'")
+ if (length(iphi) &&
+ (!is.Numeric(iphi, allowable.length = 1, positive = TRUE) ||
+ iphi >= 1))
+ stop("bad input for argument 'iphi'")
+ if (length(il1) && !is.Numeric(il1))
+ stop("bad input for argument 'il1'")
+ if (length(il2) && !is.Numeric(il2))
+ stop("bad input for argument 'il2'")
+
+
+
+
+ if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE) ||
+ nsimEIM <= 10)
+ stop("'nsimEIM' should be an integer greater than 10")
+
+
+ new("vglmff",
+ blurb = c("Mixture of two univariate exponentials\n\n",
+ "Links: ",
+ namesof("phi", lphi, earg = ephi, tag = FALSE), ", ",
+ namesof("lambda1", llambda, earg = el1 , tag = FALSE), ", ",
+ namesof("lambda2", llambda, earg = el2 , tag = FALSE), "\n",
+ "Mean: phi / lambda1 + (1 - phi) / lambda2\n"),
+
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+
+ initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ ncol.w.max = 1,
+ ncol.y.max = 1,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+
+ predictors.names =
+ c(namesof("phi", .lphi , earg = .ephi , tag = FALSE),
+ namesof("lambda1", .llambda , earg = .el1 , tag = FALSE),
+ namesof("lambda2", .llambda , earg = .el2 , tag = FALSE))
+
+ if (!length(etastart)) {
+ qy = quantile(y, prob = .qmu)
+ init.phi = rep(if(length(.iphi)) .iphi else 0.5, length = n)
+ init.lambda1 = rep(if(length(.il1)) .il1 else 1/qy[1], length = n)
+ init.lambda2 = rep(if(length(.il2)) .il2 else 1/qy[2], length = n)
+ if (!length(etastart))
+ etastart = cbind(theta2eta(init.phi, .lphi, earg = .ephi),
+ theta2eta(init.lambda1, .llambda, earg = .el1),
+ theta2eta(init.lambda2, .llambda, earg = .el2))
+ }
+ }), list(.lphi = lphi, .llambda = llambda,
+ .ephi = ephi, .el1 = el1, .el2 = el2,
+ .iphi = iphi, .il1 = il1, .il2 = il2,
+ .qmu = qmu))),
+ linkinv = eval(substitute(function(eta, extra = NULL){
+ phi = eta2theta(eta[, 1], link = .lphi, earg = .ephi)
+ lambda1 = eta2theta(eta[, 2], link = .llambda, earg = .el1)
+ lambda2 = eta2theta(eta[, 3], link = .llambda, earg = .el2)
+ phi / lambda1 + (1 - phi) / lambda2
+ }, list(.lphi = lphi, .llambda = llambda,
+ .ephi = ephi, .el1 = el1, .el2 = el2 ))),
+ last = eval(substitute(expression({
+ misc$link =
+ c("phi" = .lphi, "lambda1" = .llambda, "lambda2" = .llambda)
+
+ misc$earg =
+ list("phi" = .ephi, "lambda1" = .el1, "lambda2" = .el2)
+
+ misc$expected = TRUE
+ misc$nsimEIM = .nsimEIM
+ misc$multipleResponses <- FALSE
+ }), list(.lphi = lphi, .llambda = llambda, .nsimEIM = nsimEIM,
+ .ephi = ephi, .el1 = el1, .el2 = el2 ))),
+ loglikelihood = eval(substitute(
+ function(mu,y,w,residuals = FALSE,eta,extra = NULL) {
+ phi = eta2theta(eta[, 1], link = .lphi, earg = .ephi)
+ lambda1 = eta2theta(eta[, 2], link = .llambda, earg = .el1)
+ lambda2 = eta2theta(eta[, 3], link = .llambda, earg = .el2)
+
+ f1 = dexp(y, rate=lambda1)
+ f2 = dexp(y, rate=lambda2)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(w * log(phi*f1 + (1 - phi)*f2))
+ }, list(.lphi = lphi, .llambda = llambda,
+ .ephi = ephi, .el1 = el1, .el2 = el2 ))),
+ vfamily = c("mix2exp"),
+ deriv = eval(substitute(expression({
+ phi = eta2theta(eta[, 1], link = .lphi, earg = .ephi)
+ lambda1 = eta2theta(eta[, 2], link = .llambda, earg = .el1)
+ lambda2 = eta2theta(eta[, 3], link = .llambda, earg = .el2)
+
+ dphi.deta = dtheta.deta(phi, link = .lphi, earg = .ephi)
+ dlambda1.deta = dtheta.deta(lambda1, link = .llambda, earg = .el1)
+ dlambda2.deta = dtheta.deta(lambda2, link = .llambda, earg = .el2)
+
+ f1 = dexp(x = y, rate=lambda1)
+ f2 = dexp(x = y, rate=lambda2)
+ pdf = phi*f1 + (1 - phi)*f2
+ df1.dlambda1 = exp(-lambda1*y) - y * dexp(y, rate=lambda1)
+ df2.dlambda2 = exp(-lambda2*y) - y * dexp(y, rate=lambda2)
+ dl.dphi = (f1-f2) / pdf
+ dl.dlambda1 = phi * df1.dlambda1 / pdf
+ dl.dlambda2 = (1 - phi) * df2.dlambda2 / pdf
+
+ c(w) * cbind(dl.dphi * dphi.deta,
+ dl.dlambda1 * dlambda1.deta,
+ dl.dlambda2 * dlambda2.deta)
+ }), list(.lphi = lphi, .llambda = llambda,
+ .ephi = ephi, .el1 = el1, .el2 = el2 ))),
+ weight = eval(substitute(expression({
+ run.mean = 0
+ for(ii in 1:( .nsimEIM )) {
+ ysim = ifelse(runif(n) < phi, rexp(n, lambda1),
+ rexp(n, lambda2))
+ f1 = dexp(x = ysim, rate=lambda1)
+ f2 = dexp(x = ysim, rate=lambda2)
+ pdf = phi*f1 + (1 - phi)*f2
+
+ df1.dlambda1 = exp(-lambda1*ysim) - ysim * dexp(ysim, rate=lambda1)
+ df2.dlambda2 = exp(-lambda2*ysim) - ysim * dexp(ysim, rate=lambda2)
+ dl.dphi = (f1-f2) / pdf
+ dl.dlambda1 = phi * df1.dlambda1 / pdf
+ dl.dlambda2 = (1 - phi) * df2.dlambda2 / pdf
+ d2f1.dlambda12 = ysim*(ysim*lambda1-2)*exp(-lambda1*ysim)
+ d2f2.dlambda22 = ysim*(ysim*lambda2-2)*exp(-lambda2*ysim)
+ d2l.dphi2 = dl.dphi^2
+ d2l.dlambda12 = phi * (phi * df1.dlambda1^2 / pdf -
+ d2f1.dlambda12) / pdf
+ d2l.dlambda22 = (1 - phi) * ((1 - phi) * df2.dlambda2^2 / pdf -
+ d2f2.dlambda22) / pdf
+ d2l.dlambda1lambda2 = phi * (1 - phi) *
+ df1.dlambda1 * df2.dlambda2 / pdf^2
+ d2l.dphilambda1 = df1.dlambda1 * (phi*(f1-f2)/pdf - 1) / pdf
+ d2l.dphilambda2 = df2.dlambda2 * ((1 - phi)*(f1-f2)/pdf - 1) / pdf
+ rm(ysim)
+
+ temp3 = matrix(0, n, dimm(M))
+ temp3[,iam(1, 1, M = 3)] = d2l.dphi2
+ temp3[,iam(2, 2, M = 3)] = d2l.dlambda12
+ temp3[,iam(3, 3, M = 3)] = d2l.dlambda22
+ temp3[,iam(1, 2, M = 3)] = d2l.dphilambda1
+ temp3[,iam(1, 3, M = 3)] = d2l.dphilambda2
+ temp3[,iam(2, 3, M = 3)] = d2l.dlambda1lambda2
+ run.mean = ((ii-1) * run.mean + temp3) / ii
+ }
+ wz = if (intercept.only)
+ matrix(colMeans(run.mean), n, dimm(M), byrow = TRUE) else
+ run.mean
+
+ dtheta.detas = cbind(dphi.deta, dlambda1.deta, dlambda2.deta)
+ index0 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ wz = wz * dtheta.detas[, index0$row] *
+ dtheta.detas[, index0$col]
+ c(w) * wz
+ }), list(.lphi = lphi, .llambda = llambda,
+ .ephi = ephi, .el1 = el1, .el2 = el2,
+ .nsimEIM = nsimEIM ))))
}
+
+
+
diff --git a/R/family.nonlinear.R b/R/family.nonlinear.R
index fb33a5f..7cec868 100644
--- a/R/family.nonlinear.R
+++ b/R/family.nonlinear.R
@@ -24,19 +24,19 @@ vnonlinear.control <- function(save.weight = TRUE, ...)
subset_lohi <- function(xvec, yvec,
- prob.x = c(0.15, 0.85),
+ probs.x = c(0.15, 0.85),
type = c("median", "wtmean", "unwtmean"),
wtvec = rep(1, len = length(xvec))) {
- if (!is.Numeric(prob.x, allowable.length = 2))
- stop("argument 'prob.x' must be numeric and of length two")
+ if (!is.Numeric(probs.x, allowable.length = 2))
+ stop("argument 'probs.x' must be numeric and of length two")
- min.q <- quantile(xvec, probs = prob.x[1] )
- max.q <- quantile(xvec, probs = prob.x[2] )
+ min.q <- quantile(xvec, probs = probs.x[1] )
+ max.q <- quantile(xvec, probs = probs.x[2] )
if(mode(type) != "character" && mode(type) != "name")
- type <- as.character(substitute(type))
+ type <- as.character(substitute(type))
type <- match.arg(type, c("median", "wtmean", "unwtmean"))[1]
@@ -61,7 +61,7 @@ subset_lohi <- function(xvec, yvec,
if (x1bar >= x2bar)
stop("cannot find two distinct x values; try decreasing the first ",
- "value of argument 'prob.x' and increasing the second value")
+ "value of argument 'probs.x' and increasing the second value")
list(x1bar = x1bar,
y1bar = y1bar,
@@ -87,8 +87,7 @@ micmen.control <- function(save.weight = TRUE, ...)
oim = TRUE,
link1 = "identity", link2 = "identity",
firstDeriv = c("nsimEIM", "rpar"),
- earg1 = list(), earg2 = list(),
- prob.x = c(0.15, 0.85),
+ probs.x = c(0.15, 0.85),
nsimEIM = 500,
dispersion = 0, zero = NULL)
{
@@ -97,29 +96,33 @@ micmen.control <- function(save.weight = TRUE, ...)
firstDeriv <- match.arg(firstDeriv, c("nsimEIM", "rpar"))[1]
- if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE))
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
stop("argument 'imethod' must be integer")
- if (!is.Numeric(prob.x, allowable.length = 2))
- stop("argument 'prob.x' must be numeric and of length two")
+ if (!is.Numeric(probs.x, allowable.length = 2))
+ stop("argument 'probs.x' must be numeric and of length two")
if (!is.logical(oim) || length(oim) != 1)
stop("argument 'oim' must be single logical")
- stopifnot(nsimEIM > 10, length(nsimEIM) == 1, nsimEIM==round(nsimEIM))
+ stopifnot(nsimEIM > 10, length(nsimEIM) == 1,
+ nsimEIM == round(nsimEIM))
- if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 3)
stop("'imethod' must be 1 or 2 or 3")
estimated.dispersion <- (dispersion == 0)
- if (mode(link1) != "character" && mode(link1) != "name")
- link1 <- as.character(substitute(link1))
- if (mode(link2) != "character" && mode(link2) != "name")
- link2 <- as.character(substitute(link2))
+ link1 <- as.list(substitute(link1))
+ earg1 <- link2list(link1)
+ link1 <- attr(earg1, "function.name")
+
+ link2 <- as.list(substitute(link2))
+ earg2 <- link2list(link2)
+ link2 <- attr(earg2, "function.name")
- if (!is.list(earg1)) earg1 = list()
- if (!is.list(earg2)) earg2 = list()
new("vglmff",
blurb = c("Michaelis-Menton regression model\n",
@@ -131,21 +134,28 @@ micmen.control <- function(save.weight = TRUE, ...)
"Variance: constant"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M = 2)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M = 2)
}), list( .zero = zero))),
deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- M <- if (is.matrix(y)) ncol(y) else 1
- if (residuals) {
- if (M > 1) NULL else (y - mu) * sqrt(w)
- } else {
- rss.vgam(y - mu, w, M = M)
- }
+ M <- if (is.matrix(y)) ncol(y) else 1
+ if (residuals) {
+ if (M > 1) NULL else (y - mu) * sqrt(w)
+ } else {
+ rss.vgam(y - mu, w, M = M)
+ }
},
initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+
+
if (!length(Xm2))
stop("regressor not found")
@@ -156,20 +166,20 @@ micmen.control <- function(save.weight = TRUE, ...)
extra$Xm2 <- Xm2 # Needed for @linkinv
predictors.names <-
- c(namesof("theta1", .link1, earg = .earg1, tag = FALSE),
- namesof("theta2", .link2, earg = .earg2, tag = FALSE))
+ c(namesof("theta1", .link1 , earg = .earg1, tag = FALSE),
+ namesof("theta2", .link2 , earg = .earg2, tag = FALSE))
if (length(mustart) || length(coefstart))
stop("cannot handle 'mustart' or 'coefstart'")
if (!length(etastart)) {
if ( .imethod == 3 ) {
- index0 <- (1:n)[Xm2 <= quantile(Xm2, prob = .prob.x[2] )]
+ index0 <- (1:n)[Xm2 <= quantile(Xm2, prob = .probs.x[2] )]
init1 <- median(y[index0])
init2 <- median(init1 * Xm2 / y - Xm2)
}
if ( .imethod == 1 || .imethod == 2) {
- mysubset <- subset_lohi(Xm2, y, prob.x = .prob.x,
+ mysubset <- subset_lohi(Xm2, y, probs.x = .probs.x,
type = ifelse( .imethod == 1, "median", "wtmean"),
wtvec = w)
@@ -188,26 +198,28 @@ micmen.control <- function(save.weight = TRUE, ...)
if (length( .init2 )) init2 <- .init2
etastart <- cbind(
- rep(theta2eta(init1, .link1, earg = .earg1), len = n),
- rep(theta2eta(init2, .link2, earg = .earg2), len = n))
+ rep(theta2eta(init1, .link1 , earg = .earg1 ), len = n),
+ rep(theta2eta(init2, .link2 , earg = .earg2 ), len = n))
} else {
stop("cannot handle 'etastart' or 'mustart'")
}
}), list( .init1 = init1, .link1 = link1, .earg1 = earg1,
.init2 = init2, .link2 = link2, .earg2 = earg2,
.imethod = imethod,
- .prob.x = prob.x ))),
+ .probs.x = probs.x ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- theta1 <- eta2theta(eta[, 1], .link1, earg = .earg1)
- theta2 <- eta2theta(eta[, 2], .link2, earg = .earg2)
+ theta1 <- eta2theta(eta[, 1], .link1 , earg = .earg1 )
+ theta2 <- eta2theta(eta[, 2], .link2 , earg = .earg2 )
theta1 * extra$Xm2 / (theta2 + extra$Xm2)
}, list( .link1 = link1, .earg1 = earg1,
.link2 = link2, .earg2 = earg2))),
last = eval(substitute(expression({
- misc$link <- c(theta1 = .link1, theta2 = .link2)
+ misc$link <- c(theta1 = .link1 , theta2 = .link2)
+
misc$earg <- list(theta1 = .earg1, theta2 = .earg2 )
+
misc$rpar <- rpar
fit$df.residual <- n - rank # Not nrow_X_vlm - rank
fit$df.total <- n # Not nrow_X_vlm
@@ -215,7 +227,7 @@ micmen.control <- function(save.weight = TRUE, ...)
extra$Xm2 <- NULL # Regressor is in control$regressor
dpar <- .dispersion
if (!dpar) {
- dpar <- sum(w * (y - mu)^2) / (n - ncol_X_vlm)
+ dpar <- sum(c(w) * (y - mu)^2) / (n - ncol_X_vlm)
}
misc$dispersion <- dpar
@@ -228,6 +240,7 @@ micmen.control <- function(save.weight = TRUE, ...)
misc$oim <- .oim
misc$rpar <- rpar
misc$orig.rpar <- .rpar
+ misc$multipleResponses <- FALSE
}), list( .link1 = link1, .earg1 = earg1,
.link2 = link2, .earg2 = earg2,
.dispersion = dispersion,
@@ -242,10 +255,10 @@ micmen.control <- function(save.weight = TRUE, ...)
vfamily = c("micmen", "vnonlinear"),
deriv = eval(substitute(expression({
- theta1 <- eta2theta(eta[, 1], .link1, earg = .earg1)
- theta2 <- eta2theta(eta[, 2], .link2, earg = .earg2)
- dthetas.detas <- cbind(dtheta.deta(theta1, .link1, earg = .earg1),
- dtheta.deta(theta2, .link2, earg = .earg2))
+ theta1 <- eta2theta(eta[, 1], .link1 , earg = .earg1 )
+ theta2 <- eta2theta(eta[, 2], .link2 , earg = .earg2 )
+ dthetas.detas <- cbind(dtheta.deta(theta1, .link1 , earg = .earg1 ),
+ dtheta.deta(theta2, .link2 , earg = .earg2 ))
rpar <- if ( .firstDeriv == "rpar") {
if (iter > 1) {
@@ -273,15 +286,15 @@ micmen.control <- function(save.weight = TRUE, ...)
temp200809 <- dmus.dthetas * dthetas.detas
if (M > 1)
temp200809[, 2:M] <- temp200809[, 2:M] + sqrt(rpar)
- w * (y - mu) * temp200809
+ c(w) * (y - mu) * temp200809
} else {
- w * (y - mu) *
+ c(w) * (y - mu) *
cbind(dmus.dthetas[, 1] * dthetas.detas[, 1],
dmus.dthetas[, 2] * dthetas.detas[, 2] + sqrt(rpar))
}
} else {
temp20101111 <- dmus.dthetas * dthetas.detas
- w * (y - mu) * temp20101111
+ c(w) * (y - mu) * temp20101111
}
myderiv
@@ -346,8 +359,7 @@ micmen.control <- function(save.weight = TRUE, ...)
-skira.control <- function(save.weight = TRUE, ...)
-{
+skira.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
@@ -360,7 +372,7 @@ skira.control <- function(save.weight = TRUE, ...)
earg2 = list(),
imethod = 1,
oim = TRUE,
- prob.x = c(0.15, 0.85),
+ probs.x = c(0.15, 0.85),
smallno = 1.0e-3,
nsimEIM = 500,
firstDeriv = c("nsimEIM", "rpar"),
@@ -369,8 +381,8 @@ skira.control <- function(save.weight = TRUE, ...)
firstDeriv <- match.arg(firstDeriv, c("nsimEIM", "rpar"))[1]
- if (!is.Numeric(prob.x, allowable.length = 2))
- stop("argument 'prob.x' must be numeric and of length two")
+ if (!is.Numeric(probs.x, allowable.length = 2))
+ stop("argument 'probs.x' must be numeric and of length two")
estimated.dispersion <- dispersion == 0
if (mode(link1) != "character" && mode(link1) != "name")
@@ -378,16 +390,21 @@ skira.control <- function(save.weight = TRUE, ...)
if (mode(link2) != "character" && mode(link2) != "name")
link2 <- as.character(substitute(link2))
- if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE))
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
stop("argument 'imethod' must be integer")
+
if (imethod > 5)
stop("argument 'imethod' must be 1, 2, 3, 4 or 5")
+
if (!is.list(earg1))
earg1 = list()
if (!is.list(earg2))
earg2 = list()
- stopifnot(nsimEIM > 10, length(nsimEIM) == 1, nsimEIM == round(nsimEIM))
+ stopifnot(nsimEIM > 10, length(nsimEIM) == 1,
+ nsimEIM == round(nsimEIM))
+
new("vglmff",
blurb = c("Shinozaki-Kira regression model\n",
@@ -409,8 +426,16 @@ skira.control <- function(save.weight = TRUE, ...)
warning("20101105; need to fix a bug in the signs of initial vals")
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
if (!length(Xm2)) stop("regressor not found")
if (ncol(as.matrix(Xm2)) != 1)
stop("regressor not found or is not a vector. ",
@@ -419,8 +444,8 @@ skira.control <- function(save.weight = TRUE, ...)
extra$Xm2 <- Xm2
predictors.names <-
- c(namesof("theta1", .link1, earg = .earg1, tag = FALSE),
- namesof("theta2", .link2, earg = .earg2, tag = FALSE))
+ c(namesof("theta1", .link1 , earg = .earg1, tag = FALSE),
+ namesof("theta2", .link2 , earg = .earg2, tag = FALSE))
if (length(mustart) || length(coefstart))
stop("cannot handle 'mustart' or 'coefstart'")
@@ -428,11 +453,11 @@ skira.control <- function(save.weight = TRUE, ...)
if (!length(etastart)) {
- min.q <- quantile(Xm2, probs = .prob.x[1] )
- max.q <- quantile(Xm2, probs = .prob.x[2] )
+ min.q <- quantile(Xm2, probs = .probs.x[1] )
+ max.q <- quantile(Xm2, probs = .probs.x[2] )
if ( .imethod == 3 || .imethod == 2 ) {
- mysubset <- subset_lohi(Xm2, y, prob.x = .prob.x,
+ mysubset <- subset_lohi(Xm2, y, probs.x = .probs.x,
type = ifelse( .imethod == 2, "median", "wtmean"),
wtvec = w)
@@ -463,7 +488,7 @@ skira.control <- function(save.weight = TRUE, ...)
fitted(smooth.spline(Xm2, y, w = w, df = 2.0))
}
- mysubset <- subset_lohi(Xm2, y, prob.x = .prob.x,
+ mysubset <- subset_lohi(Xm2, y, probs.x = .probs.x,
type = "wtmean", wtvec = w)
@@ -493,40 +518,44 @@ skira.control <- function(save.weight = TRUE, ...)
if (length( .init1 )) init1 <- .init1
if (length( .init2 )) init2 <- .init2
etastart <- cbind(
- rep(theta2eta(init1, .link1, earg = .earg1), len = n),
- rep(theta2eta(init2, .link2, earg = .earg2), len = n))
+ rep(theta2eta(init1, .link1 , earg = .earg1 ), len = n),
+ rep(theta2eta(init2, .link2 , earg = .earg2 ), len = n))
} else {
stop("cannot handle 'etastart' or 'mustart'")
}
}), list( .init1 = init1, .link1 = link1, .earg1 = earg1,
.init2 = init2, .link2 = link2, .earg2 = earg2,
- .smallno = smallno, .prob.x = prob.x,
+ .smallno = smallno, .probs.x = probs.x,
.nsimEIM = nsimEIM,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- theta1 <- eta2theta(eta[, 1], .link1, earg = .earg1)
- theta2 <- eta2theta(eta[, 2], .link2, earg = .earg2)
+ theta1 <- eta2theta(eta[, 1], .link1 , earg = .earg1 )
+ theta2 <- eta2theta(eta[, 2], .link2 , earg = .earg2 )
1 / (theta1 + theta2 * extra$Xm2)
}, list( .link1 = link1, .earg1 = earg1,
.link2 = link2, .earg2 = earg2 ))),
last = eval(substitute(expression({
- misc$link <- c(theta1 = .link1, theta2 = .link2)
- misc$earg <- list(theta1 = .earg1, theta2 = .earg2)
+ misc$link <- c(theta1 = .link1 , theta2 = .link2)
+
+ misc$earg <- list(theta1 = .earg1, theta2 = .earg2 )
+
misc$rpar <- rpar
misc$orig.rpar <- .rpar
fit$df.residual <- n - rank
fit$df.total <- n
dpar <- .dispersion
if (!dpar) {
- dpar <- sum(w * (y - mu)^2) / (n - ncol_X_vlm)
+ dpar <- sum(c(w) * (y - mu)^2) / (n - ncol_X_vlm)
}
misc$dispersion <- dpar
misc$default.dispersion <- 0
misc$estimated.dispersion <- .estimated.dispersion
+
misc$imethod <- .imethod
misc$nsimEIM <- .nsimEIM
misc$firstDeriv <- .firstDeriv
misc$oim <- .oim
+ misc$multipleResponses <- FALSE
}), list( .link1 = link1, .earg1 = earg1,
.link2 = link2, .earg2 = earg2,
.dispersion = dispersion, .rpar = rpar,
@@ -548,10 +577,10 @@ skira.control <- function(save.weight = TRUE, ...)
.rpar
}
- theta1 <- eta2theta(eta[, 1], .link1, earg = .earg1)
- theta2 <- eta2theta(eta[, 2], .link2, earg = .earg2)
- dthetas.detas <- cbind(dtheta.deta(theta1, .link1, earg = .earg1),
- dtheta.deta(theta2, .link2, earg = .earg2))
+ theta1 <- eta2theta(eta[, 1], .link1 , earg = .earg1 )
+ theta2 <- eta2theta(eta[, 2], .link2 , earg = .earg2 )
+ dthetas.detas <- cbind(dtheta.deta(theta1, .link1 , earg = .earg1 ),
+ dtheta.deta(theta2, .link2 , earg = .earg2 ))
dmus.dthetas <- if (FALSE) {
attr(eval(d3), "gradient")
@@ -563,9 +592,9 @@ skira.control <- function(save.weight = TRUE, ...)
myderiv <- if ( .firstDeriv == "nsimEIM") {
- w * (y - mu) * dmus.dthetas * dthetas.detas
+ c(w) * (y - mu) * dmus.dthetas * dthetas.detas
} else {
- w * (y - mu) *
+ c(w) * (y - mu) *
cbind(dmus.dthetas[, 1] * dthetas.detas[, 1],
dmus.dthetas[, 2] * dthetas.detas[, 2] + sqrt(rpar))
}
diff --git a/R/family.normal.R b/R/family.normal.R
index c5d2fd8..9c8a9a8 100644
--- a/R/family.normal.R
+++ b/R/family.normal.R
@@ -9,7 +9,7 @@
-VGAM.weights.function = function(w, M, n) {
+VGAM.weights.function <- function(w, M, n) {
ncolw = ncol(as.matrix(w))
@@ -37,121 +37,153 @@ VGAM.weights.function = function(w, M, n) {
- gaussianff = function(dispersion = 0, parallel = FALSE, zero = NULL)
+
+ gaussianff <- function(dispersion = 0, parallel = FALSE, zero = NULL)
{
if (!is.Numeric(dispersion, allowable.length = 1) ||
dispersion < 0)
stop("bad input for argument 'dispersion'")
- estimated.dispersion = dispersion == 0
+ estimated.dispersion <- dispersion == 0
+
new("vglmff",
blurb = c("Vector linear/additive model\n",
- "Links: identity for Y1,...,YM"),
+ "Links: identity for Y1,...,YM"),
constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1, M, 1), x, .parallel , constraints)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .parallel = parallel, .zero = zero ))),
deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- M = if (is.matrix(y)) ncol(y) else 1
- n = if (is.matrix(y)) nrow(y) else length(y)
- wz = VGAM.weights.function(w = w, M = M, n = n)
- if (residuals) {
- if (M > 1) {
- U <- vchol(wz, M = M, n = n)
- temp = mux22(U, y-mu, M = M, upper = TRUE, as.matrix = TRUE)
- dimnames(temp) = dimnames(y)
- temp
- } else (y-mu) * sqrt(wz)
- } else
- rss.vgam(y-mu, wz = wz, M = M)
- },
- initialize = eval(substitute(expression({
- if (is.R())
- assign("CQO.FastAlgorithm", TRUE, envir = VGAM::VGAMenv) else
- CQO.FastAlgorithm <<- TRUE
- if (any(function.name == c("cqo","cao")) &&
- (length( .zero ) || (is.logical( .parallel ) && .parallel )))
- stop("cannot handle non-default arguments for cqo() and cao()")
-
- M = if (is.matrix(y)) ncol(y) else 1
- dy = dimnames(y)
- predictors.names = if (!is.null(dy[[2]])) dy[[2]] else
- paste("Y", 1:M, sep = "")
- if (!length(etastart))
- etastart = 0 * y
- }), list( .parallel = parallel, .zero = zero ))),
- linkinv = function(eta, extra = NULL) eta,
- last = eval(substitute(expression({
- dy = dimnames(y)
- if (!is.null(dy[[2]]))
- dimnames(fit$fitted.values) = dy
- dpar = .dispersion
- if (!dpar) {
- wz = VGAM.weights.function(w = w, M = M, n = n)
- temp5 = rss.vgam(y-mu, wz = wz, M = M)
- dpar = temp5 / (length(y) -
- (if(is.numeric(ncol(X_vlm_save))) ncol(X_vlm_save) else 0))
- }
- misc$dispersion = dpar
- misc$default.dispersion = 0
- misc$estimated.dispersion = .estimated.dispersion
- misc$link = rep("identity", length = M)
- names(misc$link) = predictors.names
-
- if (is.R()) {
- if (exists("CQO.FastAlgorithm", envir = VGAM::VGAMenv))
- rm("CQO.FastAlgorithm", envir = VGAM::VGAMenv)
- } else {
- while (exists("CQO.FastAlgorithm"))
- remove("CQO.FastAlgorithm")
- }
- }), list( .dispersion = dispersion,
- .estimated.dispersion = estimated.dispersion ))),
- loglikelihood =
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- M = if (is.matrix(y)) ncol(y) else 1
- n = if (is.matrix(y)) nrow(y) else length(y)
- wz = VGAM.weights.function(w = w, M = M, n = n)
- temp1 = rss.vgam(y-mu, wz = wz, M = M)
-
-
-
- if (M == 1 || ncol(wz) == M) {
- -0.5 * temp1 + 0.5 * sum(log(wz)) - n * (M / 2) * log(2*pi)
- } else {
- if (all(wz[1, ] == apply(wz, 2, min)) &&
- all(wz[1, ] == apply(wz, 2, max))) {
- onewz = m2adefault(wz[1, , drop = FALSE], M = M)
- onewz = onewz[,,1] # M x M
-
- logdet <- sum(log(eigen(onewz, symmetric = TRUE,
- only.values = TRUE)$values))
- logretval <- -0.5 * temp1 + 0.5 * n * logdet -
- n * (M / 2) * log(2*pi)
- logretval
- } else {
- logretval = -0.5 * temp1 - n * (M / 2) * log(2*pi)
- for (ii in 1:n) {
- onewz = m2adefault(wz[ii, , drop = FALSE], M = M)
- onewz = onewz[,,1] # M x M
- logdet <- sum(log(eigen(onewz, symmetric = TRUE,
- only.values = TRUE)$values))
- logretval = logretval + 0.5 * logdet
- }
- logretval
- }
+ M <- if (is.matrix(y)) ncol(y) else 1
+ n <- if (is.matrix(y)) nrow(y) else length(y)
+ wz <- VGAM.weights.function(w = w, M = M, n = n)
+ if (residuals) {
+ if (M > 1) {
+ U <- vchol(wz, M = M, n = n)
+ temp <- mux22(U, y-mu, M = M, upper = TRUE, as.matrix = TRUE)
+ dimnames(temp) <- dimnames(y)
+ temp
+ } else (y-mu) * sqrt(wz)
+ } else {
+ rss.vgam(y-mu, wz = wz, M = M)
+ }
+ },
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ zero = .zero)
+ }, list( .zero = zero ))),
+
+ initialize = eval(substitute(expression({
+ if (is.R())
+ assign("CQO.FastAlgorithm", TRUE, envir = VGAM::VGAMenv) else
+ CQO.FastAlgorithm <<- TRUE
+ if (any(function.name == c("cqo", "cao")) &&
+ (length( .zero ) ||
+ (is.logical( .parallel ) && .parallel )))
+ stop("cannot handle non-default arguments for cqo() and cao()")
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ M = if (is.matrix(y)) ncol(y) else 1
+ dy = dimnames(y)
+
+ predictors.names <- if (!is.null(dy[[2]])) dy[[2]] else
+ paste("Y", 1:M, sep = "")
+
+ if (!length(etastart))
+ etastart = 0 * y
+ }), list( .parallel = parallel, .zero = zero ))),
+ linkinv = function(eta, extra = NULL) eta,
+ last = eval(substitute(expression({
+ dy = dimnames(y)
+ if (!is.null(dy[[2]]))
+ dimnames(fit$fitted.values) = dy
+ dpar = .dispersion
+ if (!dpar) {
+ wz = VGAM.weights.function(w = w, M = M, n = n)
+ temp5 = rss.vgam(y-mu, wz = wz, M = M)
+ dpar = temp5 / (length(y) -
+ (if(is.numeric(ncol(X_vlm_save))) ncol(X_vlm_save) else 0))
+ }
+ misc$dispersion = dpar
+ misc$default.dispersion = 0
+ misc$estimated.dispersion = .estimated.dispersion
+
+ misc$link = rep("identity", length = M)
+ names(misc$link) = predictors.names
+ misc$earg = vector("list", M)
+ for (ilocal in 1:M)
+ misc$earg[[ilocal]] <- list()
+ names(misc$link) = predictors.names
+
+
+ if (is.R()) {
+ if (exists("CQO.FastAlgorithm", envir = VGAM::VGAMenv))
+ rm("CQO.FastAlgorithm", envir = VGAM::VGAMenv)
+ } else {
+ while (exists("CQO.FastAlgorithm"))
+ remove("CQO.FastAlgorithm")
+ }
+
+ misc$expected = TRUE
+ misc$multipleResponses <- TRUE
+ }), list( .dispersion = dispersion,
+ .estimated.dispersion = estimated.dispersion ))),
+ loglikelihood =
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ M = if (is.matrix(y)) ncol(y) else 1
+ n = if (is.matrix(y)) nrow(y) else length(y)
+ wz = VGAM.weights.function(w = w, M = M, n = n)
+ temp1 = rss.vgam(y-mu, wz = wz, M = M)
+
+
+
+ if (M == 1 || ncol(wz) == M) {
+ -0.5 * temp1 + 0.5 * sum(log(wz)) - n * (M / 2) * log(2*pi)
+ } else {
+ if (all(wz[1, ] == apply(wz, 2, min)) &&
+ all(wz[1, ] == apply(wz, 2, max))) {
+ onewz = m2adefault(wz[1, , drop = FALSE], M = M)
+ onewz = onewz[, ,1] # M x M
+
+ logdet <- sum(log(eigen(onewz, symmetric = TRUE,
+ only.values = TRUE)$values))
+ logretval <- -0.5 * temp1 + 0.5 * n * logdet -
+ n * (M / 2) * log(2*pi)
+ logretval
+ } else {
+ logretval = -0.5 * temp1 - n * (M / 2) * log(2*pi)
+ for (ii in 1:n) {
+ onewz = m2adefault(wz[ii, , drop = FALSE], M = M)
+ onewz = onewz[, ,1] # M x M
+ logdet <- sum(log(eigen(onewz, symmetric = TRUE,
+ only.values = TRUE)$values))
+ logretval = logretval + 0.5 * logdet
}
- },
- linkfun = function(mu, extra = NULL) mu,
- vfamily = "gaussianff",
- deriv=expression({
- wz = VGAM.weights.function(w = w, M = M, n = n)
- mux22(cc=t(wz), xmat=y-mu, M = M, as.matrix = TRUE)
- }),
- weight= expression({
- wz
- }))
+ logretval
+ }
+ }
+ },
+ linkfun = function(mu, extra = NULL) mu,
+ vfamily = "gaussianff",
+ deriv = expression({
+ wz = VGAM.weights.function(w = w, M = M, n = n)
+ mux22(cc = t(wz), xmat = y-mu, M = M, as.matrix = TRUE)
+ }),
+ weight = expression({
+ wz
+ }))
}
@@ -163,11 +195,12 @@ VGAM.weights.function = function(w, M, n) {
-dposnorm = function(x, mean = 0, sd = 1, log = FALSE) {
- log.arg = log
- rm(log)
- if (!is.logical(log.arg) || length(log.arg) != 1)
+dposnorm <- function(x, mean = 0, sd = 1, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
+ rm(log)
+
+
L = max(length(x), length(mean), length(sd))
x = rep(x, len = L);
mean = rep(mean, len = L);
@@ -182,7 +215,7 @@ dposnorm = function(x, mean = 0, sd = 1, log = FALSE) {
}
-pposnorm = function(q, mean = 0, sd = 1) {
+pposnorm <- function(q, mean = 0, sd = 1) {
L = max(length(q), length(mean), length(sd))
q = rep(q, len = L);
mean = rep(mean, len = L);
@@ -192,7 +225,7 @@ pposnorm = function(q, mean = 0, sd = 1) {
}
-qposnorm = function(p, mean = 0, sd = 1) {
+qposnorm <- function(p, mean = 0, sd = 1) {
if (!is.Numeric(p, positive = TRUE) || max(p) >= 1)
stop("bad input for argument 'p'")
qnorm(p = p + (1-p) * pnorm(0, mean = mean, sd = sd),
@@ -200,7 +233,7 @@ qposnorm = function(p, mean = 0, sd = 1) {
}
-rposnorm = function(n, mean = 0, sd = 1) {
+rposnorm <- function(n, mean = 0, sd = 1) {
if (!is.Numeric(n, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'n'")
mean = rep(mean, length = n)
@@ -218,17 +251,22 @@ rposnorm = function(n, mean = 0, sd = 1) {
- posnormal1 = function(lmean = "identity", lsd = "loge",
- emean = list(), esd = list(),
- imean = NULL, isd = NULL,
- nsimEIM = 100, zero = NULL)
+ posnormal1 <- function(lmean = "identity", lsd = "loge",
+ imean = NULL, isd = NULL,
+ nsimEIM = 100, zero = NULL)
{
warning("this VGAM family function is not working properly yet")
- if (mode(lmean) != "character" && mode(lmean) != "name")
- lmean = as.character(substitute(lmean))
- if (mode(lsd) != "character" && mode(lsd) != "name")
- lsd = as.character(substitute(lsd))
+
+ lmean <- as.list(substitute(lmean))
+ emean <- link2list(lmean)
+ lmean <- attr(emean, "function.name")
+
+ lsd <- as.list(substitute(lsd))
+ esd <- link2list(lsd)
+ lsd <- attr(esd, "function.name")
+
+
if (length(zero) &&
!is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
@@ -237,8 +275,6 @@ rposnorm = function(n, mean = 0, sd = 1) {
!is.Numeric(isd, positive = TRUE))
stop("bad input for argument 'isd'")
- if (!is.list(emean)) emean = list()
- if (!is.list(esd)) esd = list()
if (length(nsimEIM))
if (!is.Numeric(nsimEIM, allowable.length = 1,
@@ -251,118 +287,144 @@ rposnorm = function(n, mean = 0, sd = 1) {
blurb = c("Positive (univariate) normal distribution\n\n",
"Links: ",
namesof("mean", lmean, earg = emean, tag = TRUE), "; ",
- namesof("sd", lsd, earg = esd, tag = TRUE)),
+ namesof("sd", lsd, earg = esd, tag = TRUE)),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ par.names = c("mean", "sd"),
+ zero = .zero )
+ }, list( .zero = zero
+ ))),
+
+
+
+
initialize = eval(substitute(expression({
- if (ncol(y <- cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- if (min(y) <= 0)
- stop("response must be positive")
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = 1,
+ ncol.y.max = 1,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
- predictors.names =
+
+ predictors.names <-
c(namesof("mean", .lmean, earg = .emean, tag = FALSE),
namesof("sd", .lsd, earg = .esd, tag = FALSE))
if (!length(etastart)) {
- init.me = if (length( .imean)) rep( .imean, len = n) else NULL
- init.sd = if (length( .isd )) rep( .isd , len = n) else NULL
+ init.me = if (length( .i.mean)) rep( .i.mean, len = n) else NULL
+ init.sd = if (length( .i.sd )) rep( .i.sd , len = n) else NULL
if (!length(init.me))
init.me = rep(quantile(y, probs=0.40), len = n)
if (!length(init.sd))
- init.sd = rep(sd(c(y)) * 1.2, len = n)
- etastart = cbind(theta2eta(init.me, .lmean, earg = .emean),
- theta2eta(init.sd, .lsd, earg = .esd))
- }
- }), list( .lmean = lmean, .lsd = lsd, .imean = imean, .isd = isd,
- .emean = emean, .esd = esd ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- mymu = eta2theta(eta[,1], .lmean, earg = .emean)
- mysd = eta2theta(eta[,2], .lsd, earg = .esd)
- mymu + mysd * dnorm(-mymu/mysd) / pnorm(mymu/mysd)
- }, list( .lmean = lmean, .lsd = lsd, .emean = emean, .esd = esd ))),
- last = eval(substitute(expression({
- misc$link = c("mean"= .lmean, "sd"= .lsd)
- misc$earg = list("mean"= .emean, "sd"= .esd )
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
- }), list( .lmean = lmean, .lsd = lsd, .emean = emean, .esd = esd,
- .nsimEIM = nsimEIM ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mymu = eta2theta(eta[,1], .lmean, earg = .emean)
- mysd = eta2theta(eta[,2], .lsd, earg = .esd)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
-
- sum(w * dposnorm(x=y, m=mymu, sd = mysd, log = TRUE))
- }
- }, list( .lmean = lmean, .lsd = lsd, .emean = emean, .esd = esd ))),
- vfamily=c("posnormal1"),
- deriv = eval(substitute(expression({
- mymu = eta2theta(eta[,1], .lmean, earg = .emean)
- mysd = eta2theta(eta[,2], .lsd, earg = .esd)
- zedd = (y-mymu) / mysd
- temp7 = dnorm(-mymu/mysd)
- temp8 = pnorm(mymu/mysd) * mysd
- dl.dmu = zedd / mysd^2 - temp7 / temp8
- dl.dsd = (mymu*temp7/temp8 + zedd^3 / mysd - 1) / mysd
- dmu.deta = dtheta.deta(mymu, .lmean, earg = .emean)
- dsd.deta = dtheta.deta(mysd, .lsd, earg = .esd)
- dthetas.detas = cbind(dmu.deta, dsd.deta)
- w * dthetas.detas * cbind(dl.dmu, dl.dsd)
- }), list( .lmean = lmean, .lsd = lsd, .emean = emean, .esd = esd ))),
- weight = eval(substitute(expression({
- run.varcov = 0
- ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- if (length( .nsimEIM )) {
- for(ii in 1:( .nsimEIM )) {
- ysim <- rposnorm(n, m=mymu, sd = mysd)
- zedd = (ysim-mymu) / mysd
- temp7 = dnorm(-mymu/mysd)
- temp8 = pnorm(mymu/mysd) * mysd
- dl.dmu = zedd / mysd^2 - temp7 / temp8
- dl.dsd = (mymu*temp7/temp8 + zedd^3 / mysd - 1) / mysd
-
- rm(ysim)
- temp3 = matrix(c(dl.dmu, dl.dsd), n, 2)
- run.varcov = ((ii-1) * run.varcov +
- temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
- }
- wz = if (intercept.only)
- matrix(colMeans(run.varcov),
- n, ncol(run.varcov), byrow = TRUE) else run.varcov
-
- wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
- wz = w * matrix(wz, n, dimm(M))
- } else {
- wz = matrix(as.numeric(NA), n, dimm(M))
- ed2l.dmu2 = (1 - temp7*mymu/temp8) / mysd^2 - (temp7/temp8)^2
- ed2l.dmusd = (temp7 /(mysd * temp8)) * (1 + (mymu/mysd)^2 +
- mymu*temp7 / temp8)
- ed2l.dsd2 = 2 / mysd^2 - (temp7 * mymu /(mysd^2 * temp8)) *
- (1 + (mymu/mysd)^2 + mymu*temp7/temp8)
- wz[,iam(1,1,M)] = ed2l.dmu2 * dmu.deta^2
- wz[,iam(2,2,M)] = ed2l.dsd2 * dsd.deta^2
- wz[,iam(1,2,M)] = ed2l.dmusd * dsd.deta * dmu.deta
- wz = c(w) * wz
- }
- wz
- }), list( .lmean = lmean, .lsd = lsd, .emean = emean, .esd = esd,
- .nsimEIM = nsimEIM ))))
+ init.sd = rep(sd(c(y)) * 1.2, len = n)
+ etastart = cbind(theta2eta(init.me, .lmean, earg = .emean),
+ theta2eta(init.sd, .lsd, earg = .esd ))
+ }
+ }), list( .lmean = lmean, .lsd = lsd,
+ .i.mean = imean, .i.sd = isd,
+ .emean = emean, .esd = esd
+ ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ mymu = eta2theta(eta[, 1], .lmean, earg = .emean )
+ mysd = eta2theta(eta[, 2], .lsd, earg = .esd )
+ mymu + mysd * dnorm(-mymu/mysd) / pnorm(mymu/mysd)
+ }, list( .lmean = lmean, .lsd = lsd,
+ .emean = emean, .esd = esd
+ ))),
+ last = eval(substitute(expression({
+ misc$link = c("mean" = .lmean , "sd" = .lsd )
+ misc$earg = list("mean" = .emean , "sd" = .esd )
+ misc$expected = TRUE
+ misc$nsimEIM = .nsimEIM
+ }), list( .lmean = lmean, .lsd = lsd,
+ .emean = emean, .esd = esd,
+ .nsimEIM = nsimEIM ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ mymu = eta2theta(eta[, 1], .lmean, earg = .emean)
+ mysd = eta2theta(eta[, 2], .lsd, earg = .esd )
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+
+ sum(c(w) * dposnorm(x=y, m=mymu, sd = mysd, log = TRUE))
+ }
+ }, list( .lmean = lmean, .lsd = lsd,
+ .emean = emean, .esd = esd ))),
+ vfamily = c("posnormal1"),
+ deriv = eval(substitute(expression({
+ mymu = eta2theta(eta[, 1], .lmean, earg = .emean)
+ mysd = eta2theta(eta[, 2], .lsd, earg = .esd )
+
+ zedd = (y-mymu) / mysd
+ temp7 = dnorm(-mymu/mysd)
+ temp8 = pnorm(mymu/mysd) * mysd
+
+ dl.dmu = zedd / mysd^2 - temp7 / temp8
+ dl.dsd = (mymu*temp7/temp8 + zedd^3 / mysd - 1) / mysd
+
+ dmu.deta = dtheta.deta(mymu, .lmean, earg = .emean)
+ dsd.deta = dtheta.deta(mysd, .lsd, earg = .esd )
+ dthetas.detas = cbind(dmu.deta, dsd.deta)
+ c(w) * dthetas.detas * cbind(dl.dmu, dl.dsd)
+ }), list( .lmean = lmean, .lsd = lsd,
+ .emean = emean, .esd = esd ))),
+ weight = eval(substitute(expression({
+ run.varcov = 0
+ ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ if (length( .nsimEIM )) {
+ for(ii in 1:( .nsimEIM )) {
+ ysim <- rposnorm(n, m=mymu, sd = mysd)
+ zedd = (ysim-mymu) / mysd
+ temp7 = dnorm(-mymu/mysd)
+ temp8 = pnorm(mymu/mysd) * mysd
+ dl.dmu = zedd / mysd^2 - temp7 / temp8
+ dl.dsd = (mymu*temp7/temp8 + zedd^3 / mysd - 1) / mysd
+
+ rm(ysim)
+ temp3 = matrix(c(dl.dmu, dl.dsd), n, 2)
+ run.varcov = ((ii-1) * run.varcov +
+ temp3[, ind1$row.index]*temp3[, ind1$col.index]) / ii
+ }
+ wz = if (intercept.only)
+ matrix(colMeans(run.varcov),
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
+
+ wz = wz * dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
+ wz = c(w) * matrix(wz, n, dimm(M))
+ } else {
+ wz = matrix(as.numeric(NA), n, dimm(M))
+ ed2l.dmu2 = (1 - temp7*mymu/temp8) / mysd^2 - (temp7/temp8)^2
+ ed2l.dmusd = (temp7 /(mysd * temp8)) * (1 + (mymu/mysd)^2 +
+ mymu*temp7 / temp8)
+ ed2l.dsd2 = 2 / mysd^2 - (temp7 * mymu /(mysd^2 * temp8)) *
+ (1 + (mymu/mysd)^2 + mymu*temp7/temp8)
+ wz[, iam(1, 1, M)] = ed2l.dmu2 * dmu.deta^2
+ wz[, iam(2, 2, M)] = ed2l.dsd2 * dsd.deta^2
+ wz[, iam(1, 2, M)] = ed2l.dmusd * dsd.deta * dmu.deta
+ wz = c(w) * wz
+ }
+ wz
+ }), list( .lmean = lmean, .lsd = lsd,
+ .emean = emean, .esd = esd,
+ .nsimEIM = nsimEIM ))))
}
-dbetanorm = function(x, shape1, shape2, mean = 0, sd = 1, log = FALSE) {
- log.arg = log
- rm(log)
- if (!is.logical(log.arg) ||
- length(log.arg) != 1)
+dbetanorm <- function(x, shape1, shape2, mean = 0, sd = 1, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
+ rm(log)
+
ans =
if (log.arg) {
@@ -384,7 +446,7 @@ dbetanorm = function(x, shape1, shape2, mean = 0, sd = 1, log = FALSE) {
-pbetanorm = function(q, shape1, shape2, mean = 0, sd = 1,
+pbetanorm <- function(q, shape1, shape2, mean = 0, sd = 1,
lower.tail = TRUE, log.p = FALSE) {
pbeta(q=pnorm(q = q, mean = mean, sd = sd),
shape1=shape1, shape2=shape2,
@@ -392,7 +454,7 @@ pbetanorm = function(q, shape1, shape2, mean = 0, sd = 1,
}
-qbetanorm = function(p, shape1, shape2, mean = 0, sd = 1) {
+qbetanorm <- function(p, shape1, shape2, mean = 0, sd = 1) {
if (!is.Numeric(p, positive = TRUE) || max(p) >= 1)
stop("bad input for argument 'p'")
qnorm(p = qbeta(p = p, shape1 = shape1, shape2 = shape2),
@@ -400,7 +462,7 @@ qbetanorm = function(p, shape1, shape2, mean = 0, sd = 1) {
}
-rbetanorm = function(n, shape1, shape2, mean = 0, sd = 1) {
+rbetanorm <- function(n, shape1, shape2, mean = 0, sd = 1) {
if (!is.Numeric(n, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'n'")
qnorm(p = qbeta(p = runif(n), shape1 = shape1, shape2 = shape2),
@@ -410,11 +472,12 @@ rbetanorm = function(n, shape1, shape2, mean = 0, sd = 1) {
-dtikuv = function(x, d, mean = 0, sigma = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
+dtikuv <- function(x, d, mean = 0, sigma = 1, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
+
if (!is.Numeric(d, allowable.length = 1) ||
max(d) >= 2)
stop("bad input for argument 'd'")
@@ -434,7 +497,7 @@ dtikuv = function(x, d, mean = 0, sigma = 1, log = FALSE) {
}
-ptikuv = function(q, d, mean = 0, sigma=1) {
+ptikuv <- function(q, d, mean = 0, sigma = 1) {
if (!is.Numeric(d, allowable.length = 1) ||
max(d) >= 2)
stop("bad input for argument 'd'")
@@ -460,7 +523,7 @@ ptikuv = function(q, d, mean = 0, sigma=1) {
}
-qtikuv = function(p, d, mean = 0, sigma = 1, ...) {
+qtikuv <- function(p, d, mean = 0, sigma = 1, ...) {
if (!is.Numeric(p, positive = TRUE) || max(p) >= 1)
stop("bad input for argument 'p'")
if (!is.Numeric(d, allowable.length = 1) || max(d) >= 2)
@@ -475,7 +538,7 @@ qtikuv = function(p, d, mean = 0, sigma = 1, ...) {
sigma = rep(sigma, len = L);
ans = rep(0.0, len = L)
- myfun = function(x, d, mean = 0, sigma = 1, p)
+ myfun <- function(x, d, mean = 0, sigma = 1, p)
ptikuv(q = x, d = d, mean = mean, sigma = sigma) - p
for(i in 1:L) {
Lower = ifelse(p[i] <= 0.5, mean[i] - 3 * sigma[i], mean[i])
@@ -494,7 +557,7 @@ qtikuv = function(p, d, mean = 0, sigma = 1, ...) {
}
-rtikuv = function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
+rtikuv <- function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
if (!is.Numeric(n, positive = TRUE, integer.valued = TRUE))
stop("bad input for argument 'n'")
if (!is.Numeric(d, allowable.length = 1) || max(d) >= 2)
@@ -539,14 +602,21 @@ rtikuv = function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
- tikuv = function(d, lmean = "identity", lsigma = "loge",
- emean = list(), esigma = list(),
+ tikuv <- function(d, lmean = "identity", lsigma = "loge",
isigma = NULL, zero = 2)
{
- if (mode(lmean) != "character" && mode(lmean) != "name")
- lmean = as.character(substitute(lmean))
- if (mode(lsigma) != "character" && mode(lsigma) != "name")
- lsigma = as.character(substitute(lsigma))
+
+
+ lmean <- as.list(substitute(lmean))
+ emean <- link2list(lmean)
+ lmean <- attr(emean, "function.name")
+
+ lsigma <- as.list(substitute(lsigma))
+ e.sigma <- link2list(lsigma)
+ l.sigma <- attr(e.sigma, "function.name")
+
+
+
if (length(zero) &&
(!is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
max(zero) > 2))
@@ -554,30 +624,37 @@ rtikuv = function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
if (!is.Numeric(d, allowable.length = 1) || max(d) >= 2)
stop("bad input for argument 'd'")
- if (!is.list(emean)) emean = list()
- if (!is.list(esigma)) esigma = list()
new("vglmff",
blurb = c("Short-tailed symmetric [Tiku and Vaughan (1999)] ",
"distribution\n",
"Link: ",
- namesof("mean", lmean, earg = emean), ", ",
- namesof("sigma", lsigma, earg = esigma),
- "\n",
- "\n",
+ namesof("mean", lmean, earg = emean), ", ",
+ namesof("sigma", l.sigma, earg = e.sigma),
+ "\n", "\n",
"Mean: mean"),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ zero = .zero)
+ }, list( .zero = zero ))),
+
initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("the response must be a vector or one-column matrix")
- predictors.names =
- c(namesof("mean", .lmean, earg = .emean, tag = FALSE),
- namesof("sigma", .lsigma, earg = .esigma, tag = FALSE))
+
+ w.y.check(w = w, y = y)
+
+
+ predictors.names <-
+ c(namesof("mean", .lmean, earg = .emean, tag = FALSE),
+ namesof("sigma", .l.sigma, earg = .e.sigma, tag = FALSE))
+
+
if (!length(etastart)) {
- sigma.init = if (length(.isigma)) rep(.isigma, length = n) else {
+ sigma.init = if (length(.i.sigma)) rep(.i.sigma, length = n) else {
hh = 2 - .d
KK = 1 / (1 + 1/hh + 0.75/hh^2)
K2 = 1 + 3/hh + 15/(4*hh^2)
@@ -585,64 +662,71 @@ rtikuv = function(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6) {
}
mean.init = rep(weighted.mean(y, w), len = n)
etastart = cbind(theta2eta(mean.init, .lmean, earg = .emean),
- theta2eta(sigma.init, .lsigma, earg = .esigma))
+ theta2eta(sigma.init, .l.sigma, earg = .e.sigma))
}
- }),list( .lmean = lmean, .lsigma=lsigma, .isigma=isigma, .d = d,
- .emean = emean, .esigma=esigma ))),
+ }),list( .lmean = lmean, .l.sigma = l.sigma,
+ .i.sigma = isigma, .d = d,
+ .emean = emean, .e.sigma = e.sigma ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[,1], .lmean, earg = .emean)
+ eta2theta(eta[, 1], .lmean, earg = .emean)
}, list( .lmean = lmean,
- .emean = emean, .esigma=esigma ))),
- last = eval(substitute(expression({
- misc$link = c("mean"= .lmean, "sigma"= .lsigma)
- misc$earg = list("mean"= .emean, "sigma"= .esigma )
- misc$expected = TRUE
- misc$d = .d
- }), list( .lmean = lmean, .lsigma=lsigma, .d = d,
- .emean = emean, .esigma=esigma ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mymu = eta2theta(eta[,1], .lmean, earg = .emean)
- sigma = eta2theta(eta[,2], .lsigma, earg = .esigma)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(w * dtikuv(x=y, d = .d , mean = mymu,
- sigma = sigma, log = TRUE))
- }
- }, list( .lmean = lmean, .lsigma = lsigma, .d = d,
- .emean = emean, .esigma = esigma ))),
- vfamily=c("tikuv"),
- deriv = eval(substitute(expression({
- mymu = eta2theta(eta[,1], .lmean, earg = .emean)
- sigma = eta2theta(eta[,2], .lsigma, earg = .esigma)
- dmu.deta = dtheta.deta(mymu, .lmean, earg = .emean)
- dsigma.deta = dtheta.deta(sigma, .lsigma, earg = .esigma)
- zedd = (y - mymu) / sigma
- hh = 2 - .d
- gzedd = zedd / (1 + 0.5*zedd^2 / hh)
- dl.dmu = zedd / sigma - 2 * gzedd / (hh*sigma)
- dl.dsigma = (zedd^2 - 1 - 2 * zedd * gzedd / hh) / sigma
- c(w) * cbind(dl.dmu * dmu.deta,
- dl.dsigma * dsigma.deta)
- }), list( .lmean = lmean, .lsigma=lsigma, .d = d,
- .emean = emean, .esigma=esigma ))),
- weight = eval(substitute(expression({
- ayy = 1 / (2*hh)
- Dnos = 1 - (2/hh) * (1 - ayy) / (1 + 2*ayy + 3*ayy^2)
- Dstar = -1 + 3 * (1 + 2*ayy + 11*ayy^2) / (1 + 2*ayy + 3*ayy^2)
- ed2l.dmymu2 = Dnos / sigma^2
- ed2l.dnu2 = Dstar / sigma^2
- wz = matrix(as.numeric(NA), n, M) # diagonal matrix
- wz[,iam(1,1,M)] = ed2l.dmymu2 * dmu.deta^2
- wz[,iam(2,2,M)] = ed2l.dnu2 * dsigma.deta^2
- c(w) * wz
- }), list( .lmean = lmean, .lsigma=lsigma,
- .emean = emean, .esigma=esigma ))))
+ .emean = emean, .e.sigma = e.sigma ))),
+ last = eval(substitute(expression({
+ misc$link = c("mean"= .lmean , "sigma"= .l.sigma )
+ misc$earg = list("mean"= .emean , "sigma"= .e.sigma )
+ misc$expected = TRUE
+ misc$d = .d
+ }), list( .lmean = lmean, .l.sigma = l.sigma, .d = d,
+ .emean = emean, .e.sigma = e.sigma ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ mymu = eta2theta(eta[, 1], .lmean, earg = .emean)
+ sigma = eta2theta(eta[, 2], .l.sigma, earg = .e.sigma)
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(c(w) * dtikuv(x=y, d = .d , mean = mymu,
+ sigma = sigma, log = TRUE))
+ }
+ }, list( .lmean = lmean, .l.sigma = l.sigma, .d = d,
+ .emean = emean, .e.sigma = e.sigma ))),
+ vfamily = c("tikuv"),
+ deriv = eval(substitute(expression({
+ mymu = eta2theta(eta[, 1], .lmean, earg = .emean)
+ sigma = eta2theta(eta[, 2], .l.sigma, earg = .e.sigma)
+
+ dmu.deta = dtheta.deta(mymu, .lmean, earg = .emean)
+ dsigma.deta = dtheta.deta(sigma, .l.sigma, earg = .e.sigma)
+
+ zedd = (y - mymu) / sigma
+ hh = 2 - .d
+ gzedd = zedd / (1 + 0.5*zedd^2 / hh)
+
+ dl.dmu = zedd / sigma - 2 * gzedd / (hh*sigma)
+ dl.dsigma = (zedd^2 - 1 - 2 * zedd * gzedd / hh) / sigma
+
+ c(w) * cbind(dl.dmu * dmu.deta,
+ dl.dsigma * dsigma.deta)
+ }), list( .lmean = lmean, .l.sigma = l.sigma, .d = d,
+ .emean = emean, .e.sigma = e.sigma ))),
+ weight = eval(substitute(expression({
+ ayy = 1 / (2*hh)
+ Dnos = 1 - (2/hh) * (1 - ayy) / (1 + 2*ayy + 3*ayy^2)
+ Dstar = -1 + 3 * (1 + 2*ayy + 11*ayy^2) / (1 + 2*ayy + 3*ayy^2)
+
+ ned2l.dmymu2 = Dnos / sigma^2
+ ned2l.dnu2 = Dstar / sigma^2
+
+ wz = matrix(as.numeric(NA), n, M) # diagonal matrix
+ wz[, iam(1, 1, M)] = ned2l.dmymu2 * dmu.deta^2
+ wz[, iam(2, 2, M)] = ned2l.dnu2 * dsigma.deta^2
+ c(w) * wz
+ }), list( .lmean = lmean, .l.sigma = l.sigma,
+ .emean = emean, .e.sigma = e.sigma ))))
}
-dfnorm = function(x, mean = 0, sd = 1, a1 = 1, a2=1) {
+dfnorm <- function(x, mean = 0, sd = 1, a1 = 1, a2=1) {
if (!is.Numeric(a1, positive = TRUE) ||
!is.Numeric(a2, positive = TRUE))
stop("bad input for arguments 'a1' and 'a2'")
@@ -656,7 +740,7 @@ dfnorm = function(x, mean = 0, sd = 1, a1 = 1, a2=1) {
}
-pfnorm = function(q, mean = 0, sd = 1, a1 = 1, a2=1) {
+pfnorm <- function(q, mean = 0, sd = 1, a1 = 1, a2=1) {
if (!is.Numeric(a1, positive = TRUE) ||
!is.Numeric(a2, positive = TRUE))
stop("bad input for arguments 'a1' and 'a2'")
@@ -673,7 +757,7 @@ pfnorm = function(q, mean = 0, sd = 1, a1 = 1, a2=1) {
}
-qfnorm = function(p, mean = 0, sd = 1, a1 = 1, a2 = 1, ...) {
+qfnorm <- function(p, mean = 0, sd = 1, a1 = 1, a2 = 1, ...) {
if (!is.Numeric(p, positive = TRUE) || max(p) >= 1)
stop("bad input for argument 'p'")
if (!is.Numeric(a1, positive = TRUE) ||
@@ -690,7 +774,7 @@ qfnorm = function(p, mean = 0, sd = 1, a1 = 1, a2 = 1, ...) {
a2 = rep(a2, len = L);
ans = rep(0.0, len = L)
- myfun = function(x, mean = 0, sd = 1, a1 = 1, a2=2, p)
+ myfun <- function(x, mean = 0, sd = 1, a1 = 1, a2=2, p)
pfnorm(q = x, mean = mean, sd = sd, a1 = a1, a2 = a2) - p
for(i in 1:L) {
mytheta = mean[i]/sd[i]
@@ -710,7 +794,7 @@ qfnorm = function(p, mean = 0, sd = 1, a1 = 1, a2 = 1, ...) {
}
-rfnorm = function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
+rfnorm <- function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
if (!is.Numeric(n, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'n'")
if (!is.Numeric(a1, positive = TRUE) ||
@@ -725,57 +809,75 @@ rfnorm = function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
- fnormal1 = function(lmean = "identity", lsd = "loge",
- emean = list(), esd = list(),
+ fnormal1 <- function(lmean = "identity", lsd = "loge",
imean = NULL, isd = NULL,
a1 = 1, a2 = 1,
nsimEIM = 500, imethod = 1, zero = NULL)
{
- if (!is.Numeric(a1, positive = TRUE, allowable.length = 1) ||
- !is.Numeric(a2, positive = TRUE, allowable.length = 1))
- stop("bad input for arguments 'a1' and 'a2'")
- if (any(a1 <= 0 | a2 <= 0))
- stop("arguments 'a1' and 'a2' must each be a positive value")
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
-
- if (mode(lmean) != "character" && mode(lmean) != "name")
- lmean = as.character(substitute(lmean))
- if (mode(lsd) != "character" && mode(lsd) != "name")
- lsd = as.character(substitute(lsd))
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
+ if (!is.Numeric(a1, positive = TRUE, allowable.length = 1) ||
+ !is.Numeric(a2, positive = TRUE, allowable.length = 1))
+ stop("bad input for arguments 'a1' and 'a2'")
+ if (any(a1 <= 0 | a2 <= 0))
+ stop("arguments 'a1' and 'a2' must each be a positive value")
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
- if (!is.list(emean)) emean = list()
- if (!is.list(esd)) esd = list()
- if (!is.Numeric(nsimEIM, allowable.length = 1,
- integer.valued = TRUE) ||
- nsimEIM <= 10)
- stop("argument 'nsimEIM' should be an integer greater than 10")
- if (length(imean) && !is.Numeric(imean))
- stop("bad input for 'imean'")
- if (length(isd) && !is.Numeric(isd, positive = TRUE))
- stop("bad input for 'isd'")
- new("vglmff",
- blurb = c("(Generalized) folded univariate normal distribution\n\n",
- "Link: ",
- namesof("mean", lmean, earg = emean, tag = TRUE), "; ",
- namesof("sd", lsd, earg = esd, tag = TRUE)),
- initialize = eval(substitute(expression({
- predictors.names =
- c(namesof("mean", .lmean, earg = .emean, tag = FALSE),
- namesof("sd", .lsd, earg = .esd, tag = FALSE))
- if ((ncol(y <- cbind(y)) != 1) || any(y <= 0))
- stop("response must be a vector or a one-column ",
- "matrix with positive values")
- if (!length(etastart)) {
- junk = if (is.R()) lm.wfit(x = x, y=y, w = w) else
- lm.wfit(x = x, y=y, w = w, method = "qr")
+
+ lmean <- as.list(substitute(lmean))
+ emean <- link2list(lmean)
+ lmean <- attr(emean, "function.name")
+
+ lsd <- as.list(substitute(lsd))
+ esd <- link2list(lsd)
+ lsd <- attr(esd, "function.name")
+
+
+
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE) ||
+ nsimEIM <= 10)
+ stop("argument 'nsimEIM' should be an integer greater than 10")
+ if (length(imean) && !is.Numeric(imean))
+ stop("bad input for 'imean'")
+
+ if (length(isd) && !is.Numeric(isd, positive = TRUE))
+ stop("bad input for 'isd'")
+
+
+ new("vglmff",
+ blurb = c("(Generalized) folded univariate normal distribution\n\n",
+ "Link: ",
+ namesof("mean", lmean, earg = emean, tag = TRUE), "; ",
+ namesof("sd", lsd, earg = esd, tag = TRUE)),
+ initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = 1,
+ ncol.y.max = 1,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ predictors.names <-
+ c(namesof("mean", .lmean, earg = .emean, tag = FALSE),
+ namesof("sd", .lsd, earg = .esd, tag = FALSE))
+
+ if (!length(etastart)) {
+ junk = lm.wfit(x = x, y=y, w = w)
if (FALSE) {
@@ -784,68 +886,73 @@ rfnorm = function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
"with integer values")
m1d = meany = weighted.mean(y, w)
m2d = weighted.mean(y^2, w)
- stddev = sqrt( sum(w * junk$resid^2) / junk$df.residual )
+ stddev = sqrt( sum(c(w) * junk$resid^2) / junk$df.residual )
Ahat = m1d^2 / m2d
thetahat = sqrt(max(1/Ahat -1, 0.1))
- mean.init = rep(if(length( .imean)) .imean else
+ mean.init = rep(if(length( .i.mean)) .i.mean else
thetahat * sqrt((stddev^2 + meany^2) * Ahat), len = n)
- sd.init = rep(if(length( .isd)) .isd else
+ sd.init = rep(if(length( .i.sd)) .i.sd else
sqrt((stddev^2 + meany^2) * Ahat), len = n)
}
- stddev = sqrt( sum(w * junk$resid^2) / junk$df.residual )
- meany = weighted.mean(y, w)
- mean.init = rep(if(length( .imean)) .imean else
- {if( .imethod == 1) median(y) else meany}, len = n)
- sd.init = rep(if(length( .isd)) .isd else
- {if( .imethod == 1) stddev else 1.2*sd(y)}, len = n)
- etastart = cbind(theta2eta(mean.init, .lmean, earg = .emean),
- theta2eta(sd.init, .lsd, earg = .esd))
- }
- }), list( .lmean = lmean, .lsd = lsd, .emean = emean, .esd = esd,
- .imean = imean, .isd = isd, .a1 = a1, .a2 = a2,
- .imethod = imethod ))),
+ stddev = sqrt( sum(c(w) * junk$resid^2) / junk$df.residual )
+ meany = weighted.mean(y, w)
+ mean.init = rep(if(length( .i.mean)) .i.mean else
+ {if( .imethod == 1) median(y) else meany}, len = n)
+ sd.init = rep(if(length( .i.sd)) .i.sd else
+ {if( .imethod == 1) stddev else 1.2*sd(c(y))}, len = n)
+ etastart = cbind(theta2eta(mean.init, .lmean, earg = .emean),
+ theta2eta(sd.init, .lsd, earg = .esd ))
+ }
+ }), list( .lmean = lmean, .lsd = lsd,
+ .emean = emean, .esd = esd,
+ .i.mean = imean, .i.sd = isd,
+ .a1 = a1, .a2 = a2, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- mymu = eta2theta(eta[,1], .lmean, earg = .emean)
- mysd = eta2theta(eta[,2], .lsd, earg = .esd)
+ mymu = eta2theta(eta[, 1], .lmean, earg = .emean)
+ mysd = eta2theta(eta[, 2], .lsd, earg = .esd )
mytheta = mymu/mysd
mysd * (( .a1+ .a2) * (mytheta * pnorm(mytheta) +
dnorm(mytheta)) - .a2 * mytheta)
- }, list( .lmean = lmean, .lsd = lsd,
- .emean = emean, .esd = esd, .a1 = a1, .a2 = a2 ))),
- last = eval(substitute(expression({
- misc$link = c("mu"= .lmean, "sd"= .lsd)
- misc$earg = list("mu"= .emean, "sd"= .esd)
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
- misc$simEIM = TRUE
- misc$imethod = .imethod
- misc$a1 = .a1
- misc$a2 = .a2
- }), list( .lmean = lmean, .lsd = lsd,
- .emean = emean, .esd = esd,
- .imethod = imethod, .nsimEIM = nsimEIM,
- .a1 = a1, .a2 = a2 ))),
- loglikelihood=eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mymu = eta2theta(eta[,1], .lmean, earg = .emean)
- mysd = eta2theta(eta[,2], .lsd, earg = .esd)
- a1vec = .a1
- a2vec = .a2
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(w*log(dnorm(x=y/(a1vec*mysd) - mymu/mysd)/(a1vec*mysd) +
+ }, list( .lmean = lmean, .lsd = lsd,
+ .emean = emean, .esd = esd,
+ .a1 = a1, .a2 = a2 ))),
+ last = eval(substitute(expression({
+ misc$link = c("mu" = .lmean , "sd" = .lsd )
+
+ misc$earg = list("mu" = .emean , "sd" = .esd )
+
+ misc$multipleResponses <- FALSE
+ misc$expected = TRUE
+ misc$nsimEIM = .nsimEIM
+ misc$simEIM = TRUE
+ misc$imethod = .imethod
+ misc$a1 = .a1
+ misc$a2 = .a2
+ }), list( .lmean = lmean, .lsd = lsd,
+ .emean = emean, .esd = esd,
+ .imethod = imethod, .nsimEIM = nsimEIM,
+ .a1 = a1, .a2 = a2 ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ mymu = eta2theta(eta[, 1], .lmean, earg = .emean)
+ mysd = eta2theta(eta[, 2], .lsd, earg = .esd )
+ a1vec = .a1
+ a2vec = .a2
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(c(w)*log(dnorm(x=y/(a1vec*mysd) - mymu/mysd)/(a1vec*mysd) +
dnorm(x=y/(a2vec*mysd) + mymu/mysd)/(a2vec*mysd)))
}
}, list( .lmean = lmean, .lsd = lsd,
.emean = emean, .esd = esd, .a1 = a1, .a2 = a2 ))),
- vfamily=c("fnormal1"),
+ vfamily = c("fnormal1"),
deriv = eval(substitute(expression({
- mymu = eta2theta(eta[,1], .lmean, earg = .emean)
- mysd = eta2theta(eta[,2], .lsd, earg = .esd)
+ mymu = eta2theta(eta[, 1], .lmean, earg = .emean)
+ mysd = eta2theta(eta[, 2], .lsd, earg = .esd )
dmu.deta = dtheta.deta(mymu, .lmean, earg = .emean)
- dsd.deta = dtheta.deta(mysd, .lsd, earg = .esd)
+ dsd.deta = dtheta.deta(mysd, .lsd, earg = .esd )
a1vec = .a1
a2vec = .a2
d3 = deriv3(~ log((exp(-0.5*(y/(a1vec*mysd) - mymu/mysd)^2)/a1vec +
@@ -854,8 +961,8 @@ rfnorm = function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
name=c("mymu","mysd"), hessian= FALSE)
eval.d3 = eval(d3)
dl.dthetas = attr(eval.d3, "gradient") # == cbind(dl.dmu, dl.dsd)
- dtheta.detas = cbind(dmu.deta, dsd.deta)
- w * dtheta.detas * dl.dthetas
+ DTHETA.detas = cbind(dmu.deta, dsd.deta)
+ c(w) * DTHETA.detas * dl.dthetas
}), list( .lmean = lmean, .lsd = lsd, .emean = emean, .esd = esd,
.a1 = a1, .a2 = a2 ))),
weight = eval(substitute(expression({
@@ -875,7 +982,7 @@ rfnorm = function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
temp3 = matrix(0, n, dimm(M))
for(ss in 1:M)
for(tt in ss:M)
- temp3[,iam(ss,tt,M)] = -d2l.dthetas2[,ss,tt]
+ temp3[, iam(ss,tt, M)] = -d2l.dthetas2[, ss,tt]
run.mean = ((ii-1) * run.mean + temp3) / ii
}
@@ -885,7 +992,7 @@ rfnorm = function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
run.mean
index0 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- wz = wz * dtheta.detas[,index0$row] * dtheta.detas[,index0$col]
+ wz = wz * DTHETA.detas[, index0$row] * DTHETA.detas[, index0$col]
c(w) * wz
}), list( .nsimEIM = nsimEIM, .a1 = a1, .a2 = a2 ))))
}
@@ -894,28 +1001,38 @@ rfnorm = function(n, mean = 0, sd = 1, a1 = 1, a2=1) {
-lqnorm.control = function(trace = TRUE, ...)
-{
- list(trace=trace)
+lqnorm.control <- function(trace = TRUE, ...) {
+ list(trace = trace)
}
-lqnorm = function(qpower = 2, link = "identity", earg = list(),
- imethod = 1, imu = NULL, shrinkage.init = 0.95)
+
+
+
+lqnorm <- function(qpower = 2,
+ link = "identity",
+ imethod = 1, imu = NULL, shrinkage.init = 0.95)
{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) eerg = list()
- if (!is.Numeric(qpower, allowable.length = 1) || qpower <= 1)
- stop("bad input for argument 'qpower'")
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 3)
- stop("argument 'imethod' must be 1 or 2 or 3")
- if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
- shrinkage.init < 0 ||
- shrinkage.init > 1)
- stop("bad input for argument 'shrinkage.init'")
+
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+
+ if (!is.Numeric(qpower, allowable.length = 1) || qpower <= 1)
+ stop("bad input for argument 'qpower'")
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 3)
+ stop("argument 'imethod' must be 1 or 2 or 3")
+
+ if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+ shrinkage.init < 0 ||
+ shrinkage.init > 1)
+ stop("bad input for argument 'shrinkage.init'")
+
new("vglmff",
@@ -923,58 +1040,74 @@ lqnorm = function(qpower = 2, link = "identity", earg = list(),
"Links: ",
namesof("Y1", link, earg = earg, tag = TRUE)),
initialize = eval(substitute(expression({
- M = if (is.matrix(y)) ncol(y) else 1
- if (M != 1)
- stop("response must be a vector or a one-column matrix")
- dy = dimnames(y)
- predictors.names = if (!is.null(dy[[2]])) dy[[2]] else
- paste("mu", 1:M, sep = "")
- predictors.names = namesof(predictors.names, link = .link,
- earg = .earg, short = TRUE)
- if (!length(etastart)) {
- meany = weighted.mean(y, w)
- mean.init = rep(if(length( .imu)) .imu else
- {if( .imethod == 2) median(y) else
- if ( .imethod == 1) meany else
- .sinit * meany + (1 - .sinit) * y
- }, len = n)
- etastart = theta2eta(mean.init, link = .link, earg = .earg)
- }
- }), list( .imethod = imethod, .imu = imu,
- .sinit = shrinkage.init, .link = link, .earg = earg ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- mu = eta2theta(eta, link = .link, earg = .earg)
- mu
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- dy = dimnames(y)
- if (!is.null(dy[[2]]))
- dimnames(fit$fitted.values) = dy
- misc$link = rep( .link, length = M)
- names(misc$link) = predictors.names
- misc$earg = list(mu = .earg)
- misc$qpower = .qpower
- misc$imethod = .imethod
- misc$objectiveFunction = sum( w * (abs(y - mu))^(.qpower) )
- }), list( .qpower = qpower,
- .link = link, .earg = earg,
- .imethod = imethod ))),
- linkfun = eval(substitute(function(mu, extra = NULL) {
- theta2eta(mu, link = .link, earg = .earg)
- }, list( .link = link, .earg = earg ))),
- vfamily = "lqnorm",
- deriv = eval(substitute(expression({
- dmu.deta = dtheta.deta(theta=mu, link = .link, earg = .earg )
- myresid = y - mu
- signresid = sign(myresid)
- temp2 = (abs(myresid))^(.qpower-1)
- .qpower * w * temp2 * signresid * dmu.deta
- }), list( .qpower = qpower, .link = link, .earg = earg ))),
- weight = eval(substitute(expression({
- temp3 = (abs(myresid))^(.qpower-2)
- wz = .qpower * (.qpower - 1) * w * temp3 * dmu.deta^2
- wz
- }), list( .qpower = qpower, .link = link, .earg = earg ))))
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 1,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ M = if (is.matrix(y)) ncol(y) else 1
+ dy = dimnames(y)
+
+
+ predictors.names <- if (!is.null(dy[[2]])) dy[[2]] else
+ paste("mu", 1:M, sep = "")
+ predictors.names <- namesof(predictors.names, link = .link,
+ earg = .earg, short = TRUE)
+
+
+ if (!length(etastart)) {
+ meany = weighted.mean(y, w)
+ mean.init = rep(if(length( .i.mu)) .i.mu else
+ {if( .imethod == 2) median(y) else
+ if ( .imethod == 1) meany else
+ .sinit * meany + (1 - .sinit) * y
+ }, len = n)
+ etastart = theta2eta(mean.init, link = .link, earg = .earg)
+ }
+ }), list( .imethod = imethod, .i.mu = imu,
+ .sinit = shrinkage.init,
+ .link = link, .earg = earg ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ mu = eta2theta(eta, link = .link, earg = .earg)
+ mu
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ dy = dimnames(y)
+ if (!is.null(dy[[2]]))
+ dimnames(fit$fitted.values) = dy
+ misc$link = rep( .link, length = M)
+ names(misc$link) = predictors.names
+
+ misc$earg = list(mu = .earg)
+
+ misc$qpower = .qpower
+ misc$imethod = .imethod
+ misc$objectiveFunction = sum( c(w) * (abs(y - mu))^(.qpower) )
+ }), list( .qpower = qpower,
+ .link = link, .earg = earg,
+ .imethod = imethod ))),
+ linkfun = eval(substitute(function(mu, extra = NULL) {
+ theta2eta(mu, link = .link, earg = .earg)
+ }, list( .link = link, .earg = earg ))),
+ vfamily = "lqnorm",
+ deriv = eval(substitute(expression({
+ dmu.deta = dtheta.deta(theta=mu, link = .link, earg = .earg )
+ myresid = y - mu
+ signresid = sign(myresid)
+ temp2 = (abs(myresid))^(.qpower-1)
+ .qpower * c(w) * temp2 * signresid * dmu.deta
+ }), list( .qpower = qpower, .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+ temp3 = (abs(myresid))^(.qpower-2)
+ wz = .qpower * (.qpower - 1) * c(w) * temp3 * dmu.deta^2
+ wz
+ }), list( .qpower = qpower, .link = link, .earg = earg ))))
}
@@ -983,14 +1116,14 @@ lqnorm = function(qpower = 2, link = "identity", earg = list(),
-dtobit = function(x, mean = 0, sd = 1,
- Lower = 0, Upper = Inf, log = FALSE) {
+dtobit <- function(x, mean = 0, sd = 1,
+ Lower = 0, Upper = Inf, log = FALSE) {
- log.arg <- log
- if (!is.logical(log.arg) || length(log.arg) != 1)
- stop("argument 'log' must be a single logical")
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
rm(log)
+
L = max(length(x), length(mean), length(sd), length(Lower),
length(Upper))
x = rep(x, len = L);
@@ -1028,7 +1161,7 @@ dtobit = function(x, mean = 0, sd = 1,
-ptobit = function(q, mean = 0, sd = 1,
+ptobit <- function(q, mean = 0, sd = 1,
Lower = 0, Upper = Inf,
lower.tail = TRUE, log.p = FALSE) {
@@ -1046,10 +1179,10 @@ ptobit = function(q, mean = 0, sd = 1,
Upper = rep(Upper, len = L);
ans = pnorm(q = q, mean = mean, sd = sd, lower.tail = lower.tail)
- ind1 <- q < Lower
+ ind1 <- (q < Lower)
ans[ind1] = if (lower.tail) ifelse(log.p, log(0.0), 0.0) else
ifelse(log.p, log(1.0), 1.0)
- ind2 <- Upper <= q
+ ind2 <- (Upper <= q)
ans[ind2] = if (lower.tail) ifelse(log.p, log(1.0), 1.0) else
ifelse(log.p, log(0.0), 0.0)
@@ -1059,7 +1192,7 @@ ptobit = function(q, mean = 0, sd = 1,
-qtobit = function(p, mean = 0, sd = 1,
+qtobit <- function(p, mean = 0, sd = 1,
Lower = 0, Upper = Inf) {
L = max(length(p), length(mean), length(sd), length(Lower),
@@ -1074,10 +1207,10 @@ qtobit = function(p, mean = 0, sd = 1,
pnorm.Lower = ptobit(q = Lower, mean = mean, sd = sd)
pnorm.Upper = ptobit(q = Upper, mean = mean, sd = sd)
- ind1 <- p <= pnorm.Lower
+ ind1 <- (p <= pnorm.Lower)
ans[ind1] = Lower[ind1]
- ind2 <- pnorm.Upper <= p
+ ind2 <- (pnorm.Upper <= p)
ans[ind2] = Upper[ind2]
ans
@@ -1088,7 +1221,7 @@ qtobit = function(p, mean = 0, sd = 1,
-rtobit = function(n, mean = 0, sd = 1,
+rtobit <- function(n, mean = 0, sd = 1,
Lower = 0, Upper = Inf) {
use.n = if ((length.n <- length(n)) > 1) length.n else
@@ -1126,7 +1259,6 @@ tobit.control <- function(save.weight = TRUE, ...)
tobit <- function(Lower = 0, Upper = Inf,
lmu = "identity", lsd = "loge",
- emu = list(), esd = list(),
nsimEIM = 250,
imu = NULL, isd = NULL,
type.fitted = c("uncensored", "censored", "mean.obs"),
@@ -1137,10 +1269,17 @@ tobit.control <- function(save.weight = TRUE, ...)
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu = as.character(substitute(lmu))
- if (mode(lsd) != "character" && mode(lsd) != "name")
- lsd = as.character(substitute(lsd))
+
+
+ lmu <- as.list(substitute(lmu))
+ e.mu <- link2list(lmu)
+ l.mu <- attr(e.mu, "function.name")
+
+ lsd <- as.list(substitute(lsd))
+ esd <- link2list(lsd)
+ lsd <- attr(esd, "function.name")
+
+
if (!is.Numeric(imethod, allowable.length = 1,
integer.valued = TRUE, positive = TRUE) ||
@@ -1152,6 +1291,7 @@ tobit.control <- function(save.weight = TRUE, ...)
any(Lower >= Upper))
stop("Lower and Upper must ",
"be numeric with Lower < Upper")
+
if (length(zero) &&
!is.Numeric(zero, integer.valued = TRUE))
stop("bad input for argument 'zero'")
@@ -1165,17 +1305,16 @@ tobit.control <- function(save.weight = TRUE, ...)
type.fitted <- match.arg(type.fitted,
c("uncensored", "censored", "mean.obs"))[1]
- if (!is.list(emu)) emu = list()
- if (!is.list(esd)) esd = list()
- stdTobit = all( Lower == 0.0) &&
+ stdTobit = all(Lower == 0.0) &&
all(!is.finite(Upper)) &&
all(lmu == "identity")
+
new("vglmff",
blurb = c("Tobit model\n\n",
"Links: ",
- namesof("mu", lmu, earg = emu, tag = TRUE), "; ",
+ namesof("mu", l.mu, earg = e.mu, tag = TRUE), "; ",
namesof("sd", lsd, earg = esd, tag = TRUE), "\n",
"Mean: mu", "\n",
"Conditional variance: sd^2"),
@@ -1186,26 +1325,40 @@ tobit.control <- function(save.weight = TRUE, ...)
eval(negzero.expression)
}), list( .zero = zero ))),
+
infos = eval(substitute(function(...) {
list(Musual = 2,
- zero = .zero,
- nsimEIM = .nsimEIM)
+ zero = .zero ,
+ nsimEIM = .nsimEIM )
}, list( .zero = zero, .nsimEIM = nsimEIM ))),
+
initialize = eval(substitute(expression({
Musual = 2
- y = cbind(y)
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+
ncoly = ncol(y)
M = Musual * ncoly
- Lowmat = matrix( .Lower, nrow = n, ncol = ncoly, byrow = TRUE)
- Uppmat = matrix( .Upper, nrow = n, ncol = ncoly, byrow = TRUE)
+ Lowmat = matrix( .Lower , nrow = n, ncol = ncoly, byrow = TRUE)
+ Uppmat = matrix( .Upper , nrow = n, ncol = ncoly, byrow = TRUE)
extra$censoredL = (y <= Lowmat)
extra$censoredU = (y >= Uppmat)
if (any(y < Lowmat)) {
warning("replacing response values less than the value ",
- .Lower, " by ", .Lower)
+ .Lower , " by ", .Lower )
y[y < Lowmat] = Lowmat[y < Lowmat]
}
if (any(y > Uppmat)) {
@@ -1214,52 +1367,52 @@ tobit.control <- function(save.weight = TRUE, ...)
y[y > Uppmat] = Uppmat[y > Uppmat]
}
- temp1.names =
+ temp1.names <-
if (ncoly == 1) "mu" else paste("mu", 1:ncoly, sep = "")
- temp2.names =
+ temp2.names <-
if (ncoly == 1) "sd" else paste("sd", 1:ncoly, sep = "")
- predictors.names =
- c(namesof(temp1.names, .lmu, earg = .emu, tag = FALSE),
+ predictors.names <-
+ c(namesof(temp1.names, .l.mu, earg = .e.mu, tag = FALSE),
namesof(temp2.names, .lsd, earg = .esd, tag = FALSE))
- predictors.names = predictors.names[interleave.VGAM(M, M = Musual)]
+ predictors.names <- predictors.names[interleave.VGAM(M, M = Musual)]
if (!length(etastart)) {
- anyc = cbind(extra$censoredL | extra$censoredU)
- i11 = if ( .imethod == 1) anyc else FALSE # can be all data
+ anyc <- cbind(extra$censoredL | extra$censoredU)
+ i11 <- if ( .imethod == 1) anyc else FALSE # can be all data
- mu.init =
- sd.init = matrix(0.0, n, ncoly)
+ mu.init <-
+ sd.init <- matrix(0.0, n, ncoly)
for(ii in 1:ncol(y)) {
- use.i11 = i11[, ii]
- mylm = lm.wfit(x = cbind(x[!use.i11,]),
- y = y[!use.i11, ii], w = w[!use.i11])
- sd.init[, ii] = sqrt( sum(w[!use.i11] * mylm$resid^2)
+ use.i11 <- i11[, ii]
+ mylm <- lm.wfit(x = cbind(x[!use.i11, ]),
+ y = y[!use.i11, ii], w = w[!use.i11, ii])
+ sd.init[, ii] <- sqrt( sum(w[!use.i11, ii] * mylm$resid^2)
/ mylm$df.residual ) * 1.5
- mu.init[!use.i11, ii] = mylm$fitted.values
+ mu.init[!use.i11, ii] <- mylm$fitted.values
if (any(anyc[, ii]))
- mu.init[anyc[, ii], ii] = x[anyc[, ii],, drop = FALSE] %*%
+ mu.init[anyc[, ii], ii] <- x[anyc[, ii],, drop = FALSE] %*%
mylm$coeff
}
- if (length( .imu ))
- mu.init = matrix( .imu, n, ncoly, byrow = TRUE)
- if (length( .isd ))
- sd.init = matrix( .isd, n, ncoly, byrow = TRUE)
+ if (length( .i.mu ))
+ mu.init <- matrix( .i.mu , n, ncoly, byrow = TRUE)
+ if (length( .i.sd ))
+ sd.init <- matrix( .i.sd , n, ncoly, byrow = TRUE)
- etastart = cbind(theta2eta(mu.init, .lmu, earg = .emu),
- theta2eta(sd.init, .lsd, earg = .esd))
+ etastart <- cbind(theta2eta(mu.init, .l.mu, earg = .e.mu ),
+ theta2eta(sd.init, .lsd, earg = .esd ))
- etastart = etastart[, interleave.VGAM(M, M = Musual), drop = FALSE]
+ etastart <- etastart[, interleave.VGAM(M, M = Musual), drop = FALSE]
}
}), list( .Lower = Lower, .Upper = Upper,
- .lmu = lmu, .lsd = lsd,
- .emu = emu, .esd = esd,
- .imu = imu, .isd = isd,
+ .l.mu = l.mu, .lsd = lsd,
+ .e.mu = e.mu, .esd = esd,
+ .i.mu = imu, .i.sd = isd,
.imethod = imethod ))),
linkinv = eval(substitute( function(eta, extra = NULL) {
Musual = 2
ncoly = ncol(eta) / Musual
- mum = eta2theta(eta[,Musual*(1:ncoly)-1, drop=FALSE], .lmu, earg = .emu)
+ mum = eta2theta(eta[, Musual*(1:ncoly)-1, drop=FALSE], .l.mu, earg = .e.mu )
if ( .type.fitted == "uncensored")
return(mum)
@@ -1271,7 +1424,8 @@ tobit.control <- function(save.weight = TRUE, ...)
mum
} else {
- sdm = eta2theta(eta[,Musual*(1:ncoly)-0, drop=FALSE],.lsd, earg = .esd)
+ sdm = eta2theta(eta[, Musual*(1:ncoly)-0, drop = FALSE],
+ .lsd , earg = .esd )
zeddL = (Lowmat - mum) / sdm
zeddU = (Uppmat - mum) / sdm
Phi.L = pnorm(zeddL)
@@ -1283,13 +1437,13 @@ tobit.control <- function(save.weight = TRUE, ...)
Lowmat * Phi.L +
Uppmat * (1 - Phi.U)
}
- }, list( .lmu = lmu, .lsd = lsd,
- .emu = emu, .esd = esd,
+ }, list( .l.mu = l.mu, .lsd = lsd,
+ .e.mu = e.mu, .esd = esd,
.Lower = Lower, .Upper = Upper,
.type.fitted = type.fitted ))),
last = eval(substitute(expression({
- temp0303 = c(rep( .lmu, length = ncoly),
+ temp0303 = c(rep( .l.mu, length = ncoly),
rep( .lsd, length = ncoly))
names(temp0303) =
c(if (ncoly == 1) "mu" else paste("mu", 1:ncoly, sep = ""),
@@ -1300,17 +1454,19 @@ tobit.control <- function(save.weight = TRUE, ...)
misc$earg = vector("list", M)
names(misc$earg) = names(misc$link)
for(ii in 1:ncoly) {
- misc$earg[[Musual*ii-1]] = .emu
- misc$earg[[Musual*ii ]] = .esd
+ misc$earg[[Musual*ii-1]] = .e.mu
+ misc$earg[[Musual*ii ]] = .esd
}
+ misc$multipleResponses <- TRUE
misc$expected = TRUE
- misc$Lower = .Lower
- misc$Upper = .Upper
misc$imethod = .imethod
misc$nsimEIM = .nsimEIM
misc$Musual = Musual
misc$stdTobit = .stdTobit
+ misc$Lower = Lowmat
+ misc$Upper = Uppmat
+
if ( .stdTobit ) {
save.weight <- control$save.weight <- FALSE
@@ -1318,11 +1474,12 @@ tobit.control <- function(save.weight = TRUE, ...)
}
- }), list( .lmu = lmu, .lsd = lsd,
- .emu = emu, .esd = esd,
+ }), list( .l.mu = l.mu, .lsd = lsd,
+ .e.mu = e.mu, .esd = esd,
.nsimEIM = nsimEIM, .imethod = imethod,
.stdTobit = stdTobit,
- .Lower = Lower, .Upper = Upper ))),
+ .Lower = Lower,
+ .Upper = Upper ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
Musual = 2
@@ -1332,12 +1489,14 @@ tobit.control <- function(save.weight = TRUE, ...)
cenL = extra$censoredL
cenU = extra$censoredU
cen0 = !cenL & !cenU # uncensored obsns
- Lowmat = matrix( .Lower, nrow = nrow(eta), ncol = ncoly, byrow = TRUE)
- Uppmat = matrix( .Upper, nrow = nrow(eta), ncol = ncoly, byrow = TRUE)
+ Lowmat = matrix( .Lower , nrow = nrow(eta), ncol = ncoly, byrow = TRUE)
+ Uppmat = matrix( .Upper , nrow = nrow(eta), ncol = ncoly, byrow = TRUE)
- mum = eta2theta(eta[,Musual*(1:ncoly)-1, drop=FALSE],.lmu, earg = .emu)
- sdm = eta2theta(eta[,Musual*(1:ncoly)-0, drop=FALSE],.lsd, earg = .esd)
+ mum = eta2theta(eta[, Musual*(1:ncoly)-1, drop = FALSE],
+ .l.mu , earg = .e.mu )
+ sdm = eta2theta(eta[, Musual*(1:ncoly)-0, drop = FALSE],
+ .lsd , earg = .esd )
ell0 = dnorm( y[cen0], mean = mum[cen0], sd = sdm[cen0],
log = TRUE)
@@ -1355,8 +1514,8 @@ tobit.control <- function(save.weight = TRUE, ...)
sum(wmat[cenL] * ellL) +
sum(wmat[cenU] * ellU)
}
- }, list( .lmu = lmu, .lsd = lsd,
- .emu = emu, .esd = esd,
+ }, list( .l.mu = l.mu, .lsd = lsd,
+ .e.mu = e.mu, .esd = esd,
.Lower = Lower, .Upper = Upper ))),
vfamily = c("tobit"),
deriv = eval(substitute(expression({
@@ -1364,22 +1523,22 @@ tobit.control <- function(save.weight = TRUE, ...)
y = cbind(y)
ncoly = ncol(y)
- Lowmat = matrix( .Lower, nrow = n, ncol = ncoly, byrow = TRUE)
- Uppmat = matrix( .Upper, nrow = n, ncol = ncoly, byrow = TRUE)
+ Lowmat = matrix( .Lower , nrow = n, ncol = ncoly, byrow = TRUE)
+ Uppmat = matrix( .Upper , nrow = n, ncol = ncoly, byrow = TRUE)
cenL = extra$censoredL
cenU = extra$censoredU
cen0 = !cenL & !cenU # uncensored obsns
- mum = eta2theta(eta[, Musual*(1:ncoly)-1, drop = FALSE], .lmu, earg = .emu)
- sdm = eta2theta(eta[, Musual*(1:ncoly)-0, drop = FALSE], .lsd, earg = .esd)
+ mum = eta2theta(eta[, Musual*(1:ncoly)-1, drop = FALSE], .l.mu, earg = .e.mu )
+ sdm = eta2theta(eta[, Musual*(1:ncoly)-0, drop = FALSE], .lsd, earg = .esd )
zedd = (y - mum) / sdm
dl.dmu = zedd / sdm
dl.dsd = (zedd^2 - 1) / sdm
- dmu.deta = dtheta.deta(mum, .lmu, earg = .emu)
- dsd.deta = dtheta.deta(sdm, .lsd, earg = .esd)
+ dmu.deta = dtheta.deta(mum, .l.mu, earg = .e.mu )
+ dsd.deta = dtheta.deta(sdm, .lsd, earg = .esd )
if (any(cenL)) {
mumL = Lowmat - mum
@@ -1403,19 +1562,22 @@ tobit.control <- function(save.weight = TRUE, ...)
dthetas.detas = cbind(dmu.deta, dsd.deta)
dThetas.detas = dthetas.detas[, interleave.VGAM(M, M = Musual)]
- myderiv = c(w) * cbind(dl.dmu, dl.dsd) * dthetas.detas
+ myderiv = cbind(c(w) * dl.dmu,
+ c(w) * dl.dsd) * dthetas.detas
myderiv[, interleave.VGAM(M, M = Musual)]
- }), list( .lmu = lmu, .lsd = lsd,
- .emu = emu, .esd = esd,
+ }), list( .l.mu = l.mu, .lsd = lsd,
+ .e.mu = e.mu, .esd = esd,
.Lower = Lower, .Upper = Upper ))),
weight = eval(substitute(expression({
- wz = matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
+ wz = matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
ind1 = iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
if (is.numeric( .nsimEIM ) &&
! .stdTobit ) {
+
+
run.varcov = 0
for(spp. in 1:ncoly) {
@@ -1452,15 +1614,15 @@ tobit.control <- function(save.weight = TRUE, ...)
dl.dsd[cenU] = fred21 * (-mumU[cenU] / sdvec[cenU]^2)
}
- rm(ysim)
- temp3 = cbind(dl.dmu, dl.dsd)
- run.varcov = run.varcov +
- temp3[, ind1$row.index] *
- temp3[, ind1$col.index]
+ rm(ysim)
+ temp3 = cbind(dl.dmu, dl.dsd)
+ run.varcov = run.varcov +
+ temp3[, ind1$row.index] *
+ temp3[, ind1$col.index]
}
run.varcov = run.varcov / .nsimEIM
- wz1 = if (intercept.only)
+ wz1 = if (intercept.only && FALSE)
matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow = TRUE) else
run.varcov
@@ -1515,8 +1677,11 @@ tobit.control <- function(save.weight = TRUE, ...)
} # End of EIM
- c(w) * wz
- }), list( .lmu = lmu, .Lower = Lower, .Upper = Upper,
+ temp = w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = ncoly)
+
+
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = ncoly)
+ }), list( .l.mu = lmu, .Lower = Lower, .Upper = Upper,
.lsd = lsd,
.stdTobit = stdTobit,
.nsimEIM = nsimEIM ))))
@@ -1527,7 +1692,6 @@ tobit.control <- function(save.weight = TRUE, ...)
normal1 <- function(lmean = "identity", lsd = "loge", lvar = "loge",
- emean = list(), esd = list(), evar = list(),
var.arg = FALSE,
imethod = 1,
isd = NULL,
@@ -1538,34 +1702,51 @@ tobit.control <- function(save.weight = TRUE, ...)
- if (mode(lmean) != "character" && mode(lmean) != "name")
- lmean <- as.character(substitute(lmean))
- if (mode(lsd) != "character" && mode(lsd) != "name")
- lsd <- as.character(substitute(lsd))
+
+
+ lmean <- as.list(substitute(lmean))
+ emean <- link2list(lmean)
+ lmean <- attr(emean, "function.name")
+
+ lsd <- as.list(substitute(lsd))
+ esd <- link2list(lsd)
+ lsd <- attr(esd, "function.name")
+
+ lvar <- as.list(substitute(lvar))
+ e.var <- link2list(lvar)
+ l.var <- attr(e.var, "function.name")
+
+
+
+
+
if (length(zero) &&
!is.Numeric(zero, integer.valued = TRUE))
stop("bad input for argument 'zero'")
- if (!is.list(emean)) emean <- list()
- if (!is.list(esd)) esd <- list()
- if (!is.list(evar)) evar <- list()
if (!is.Numeric(imethod, allowable.length = 1,
integer.valued = TRUE, positive = TRUE) ||
- imethod > 3)
- stop("argument 'imethod' must be 1 or 2 or 3")
+ imethod > 4)
+ stop("argument 'imethod' must be 1 or 2 or 3 or 4")
+
if (!is.logical(var.arg) || length(var.arg) != 1)
stop("argument 'var.arg' must be a single logical")
- if (!is.logical(intercept.apply) || length(intercept.apply) != 1)
+ if (!is.logical(intercept.apply) ||
+ length(intercept.apply) != 1)
stop("argument 'intercept.apply' must be a single logical")
+ if (is.logical(parallel) && parallel && length(zero))
+ stop("set 'zero = NULL' if 'parallel = TRUE'")
+
+
new("vglmff",
blurb = c("Univariate normal distribution\n\n",
"Links: ",
namesof("mean", lmean, earg = emean, tag = TRUE), "; ",
if (var.arg)
- namesof("var", lvar, earg = evar, tag = TRUE) else
+ namesof("var", l.var, earg = e.var, tag = TRUE) else
namesof("sd" , lsd, earg = esd, tag = TRUE),
"\n",
if (var.arg) "Variance: var" else "Variance: sd^2"),
@@ -1574,7 +1755,7 @@ tobit.control <- function(save.weight = TRUE, ...)
constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
intercept.apply = .intercept.apply )
dotzero <- .zero
@@ -1591,14 +1772,10 @@ tobit.control <- function(save.weight = TRUE, ...)
initialize = eval(substitute(expression({
orig.y <- y
- y <- cbind(y)
- ncoly <- ncol(y)
- Musual <- 2
- extra$ncoly <- ncoly
- extra$Musual <- Musual
- M <- Musual * ncoly
+
+
@@ -1615,24 +1792,42 @@ tobit.control <- function(save.weight = TRUE, ...)
extra$attributes.y = attributes(orig.y)
} else {
- w <- cbind(w)
- if (ncol(w) < ncoly)
- w <- matrix(w, n, ncoly)
- if (ncol(w) > ncoly)
- stop("currently the 'weights' argument must have no more ",
- "than the number of columns of the response")
}
+
+
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+ ncoly <- ncol(y)
+ Musual <- 2
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+
mynames1 <- paste("mean",
if (ncoly > 1) 1:ncoly else "", sep = "")
mynames2 <- paste(if ( .var.arg ) "var" else "sd",
if (ncoly > 1) 1:ncoly else "", sep = "")
predictors.names <-
- c(namesof(mynames1, .lmean, earg = .emean, tag = FALSE),
+ c(namesof(mynames1, .lmean , earg = .emean , tag = FALSE),
if ( .var.arg )
- namesof(mynames2, .lvar , earg = .evar , tag = FALSE) else
- namesof(mynames2, .lsd , earg = .esd , tag = FALSE))
+ namesof(mynames2, .l.var , earg = .e.var , tag = FALSE) else
+ namesof(mynames2, .lsd , earg = .esd , tag = FALSE))
predictors.names <- predictors.names[interleave.VGAM(M, M = Musual)]
extra$predictors.names <- predictors.names
@@ -1645,42 +1840,50 @@ tobit.control <- function(save.weight = TRUE, ...)
pmax(1/1024, y[, jay]) else
if( .imethod == 1) median(y[, jay]) else
if( .imethod == 2) weighted.mean(y[, jay], w = w[, jay]) else
+ if( .imethod == 3) weighted.mean(y[, jay], w = w[, jay]) *
+ 0.5 + y[, jay] * 0.5 else
mean(jfit$fitted)
sdev.init[, jay] <-
if( .imethod == 1) {
- sqrt( sum(w * (y[, jay] - mean.init[, jay])^2) / sum(w[, jay]) )
+ sqrt( sum(w[, jay] *
+ (y[, jay] - mean.init[, jay])^2) / sum(w[, jay]) )
} else if( .imethod == 2) {
if (jfit$df.resid > 0)
sqrt( sum(w[, jay] * jfit$resid^2) / jfit$df.resid ) else
sqrt( sum(w[, jay] * jfit$resid^2) / sum(w[, jay]) )
+ } else if( .imethod == 3) {
+ sqrt( sum(w[, jay] *
+ (y[, jay] - mean.init[, jay])^2) / sum(w[, jay]) )
} else {
sqrt( sum(w[, jay] * abs(y[, jay] -
mean.init[, jay])) / sum(w[, jay]) )
}
- if (any(sdev.init[, jay] <= sqrt(.Machine$double.eps) ))
+ if (any(sdev.init[, jay] <= sqrt( .Machine$double.eps ) ))
sdev.init[, jay] <- 1.01
}
- if (length( .isd )) {
- sdev.init <- matrix( .isd , n, ncoly, byrow = TRUE)
+ if (length( .i.sd )) {
+ sdev.init <- matrix( .i.sd , n, ncoly, byrow = TRUE)
}
- etastart <- cbind(theta2eta(mean.init, .lmean , earg = .emean ),
- if ( .var.arg )
- theta2eta(sdev.init^2, .lvar , earg = .evar ) else
- theta2eta(sdev.init , .lsd , earg = .esd ))
- etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
+ etastart <-
+ cbind(theta2eta(mean.init, .lmean , earg = .emean ),
+ if ( .var.arg )
+ theta2eta(sdev.init^2, .l.var , earg = .e.var ) else
+ theta2eta(sdev.init , .lsd , earg = .esd ))
+ etastart <-
+ etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
colnames(etastart) <- predictors.names
}
- }), list( .lmean = lmean, .lsd = lsd, .lvar = lvar,
- .emean = emean, .esd = esd, .evar = evar,
- .isd = isd,
+ }), list( .lmean = lmean, .lsd = lsd, .l.var = l.var,
+ .emean = emean, .esd = esd, .e.var = e.var,
+ .i.sd = isd,
.var.arg = var.arg, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
@@ -1688,12 +1891,12 @@ tobit.control <- function(save.weight = TRUE, ...)
ncoly <- extra$ncoly
eta2theta(eta[, Musual*(1:ncoly) - 1], .lmean , earg = .emean )
}, list( .lmean = lmean,
- .emean = emean, .esd = esd , .evar = evar ))),
+ .emean = emean, .esd = esd , .e.var = e.var ))),
last = eval(substitute(expression({
Musual <- extra$Musual
misc$link <- c(rep( .lmean , length = ncoly),
- rep( .lsd , length = ncoly))
+ rep( .lsd , length = ncoly))
misc$link <- misc$link[interleave.VGAM(Musual * ncoly, M = Musual)]
temp.names <- c(mynames1, mynames2)
temp.names <- temp.names[interleave.VGAM(Musual * ncoly, M = Musual)]
@@ -1703,8 +1906,8 @@ tobit.control <- function(save.weight = TRUE, ...)
misc$earg <- vector("list", Musual * ncoly)
names(misc$earg) <- temp.names
for(ii in 1:ncoly) {
- misc$earg[[Musual*ii-1]] <- .emean
- misc$earg[[Musual*ii ]] <- if ( .var.arg) .evar else .esd
+ misc$earg[[Musual*ii-1]] <- .emean
+ misc$earg[[Musual*ii ]] <- if ( .var.arg) .e.var else .esd
}
names(misc$earg) <- temp.names
@@ -1712,8 +1915,12 @@ tobit.control <- function(save.weight = TRUE, ...)
misc$Musual <- Musual
misc$expected <- TRUE
misc$imethod <- .imethod
- }), list( .lmean = lmean, .lsd = lsd, .lvar = lvar,
- .emean = emean, .esd = esd, .evar = evar,
+ misc$multipleResponses <- TRUE
+ misc$parallel <- .parallel
+ misc$intercept.apply <- .intercept.apply
+ }), list( .lmean = lmean, .lsd = lsd, .l.var = l.var,
+ .emean = emean, .esd = esd, .e.var = e.var,
+ .parallel = parallel, .intercept.apply = intercept.apply,
.var.arg = var.arg, .imethod = imethod ))),
loglikelihood = eval(substitute(
@@ -1721,17 +1928,17 @@ tobit.control <- function(save.weight = TRUE, ...)
ncoly <- extra$ncoly
Musual <- extra$Musual
if ( .var.arg ) {
- Varm <- eta2theta(eta[, Musual*(1:ncoly) ], .lvar , earg = .evar )
+ Varm <- eta2theta(eta[, Musual*(1:ncoly)], .l.var , earg = .e.var )
sdev <- sqrt(Varm)
} else {
- sdev <- eta2theta(eta[, Musual*(1:ncoly) ], .lsd , earg = .esd )
+ sdev <- eta2theta(eta[, Musual*(1:ncoly)], .lsd , earg = .esd )
}
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dnorm(y, m = mu, sd = sdev, log = TRUE))
+ sum(c(w) * dnorm(y, m = mu, sd = sdev, log = TRUE))
}
- }, list( .lsd = lsd, .lvar = lvar,
- .esd = esd, .evar = evar,
+ }, list( .lsd = lsd, .l.var = l.var,
+ .esd = esd, .e.var = e.var,
.var.arg = var.arg ))),
vfamily = c("normal1"),
deriv = eval(substitute(expression({
@@ -1740,7 +1947,7 @@ tobit.control <- function(save.weight = TRUE, ...)
mymu <- eta2theta(eta[, Musual*(1:ncoly) - 1], .lmean , earg = .emean )
if ( .var.arg ) {
- Varm <- eta2theta(eta[, Musual*(1:ncoly) ], .lvar , earg = .evar )
+ Varm <- eta2theta(eta[, Musual*(1:ncoly) ], .l.var , earg = .e.var )
sdev <- sqrt(Varm)
} else {
sdev <- eta2theta(eta[, Musual*(1:ncoly) ], .lsd , earg = .esd )
@@ -1755,36 +1962,38 @@ tobit.control <- function(save.weight = TRUE, ...)
dmu.deta <- dtheta.deta(mymu, .lmean , earg = .emean )
if ( .var.arg ) {
- dva.deta <- dtheta.deta(Varm, .lvar , earg = .evar )
+ dva.deta <- dtheta.deta(Varm, .l.var , earg = .e.var )
} else {
dsd.deta <- dtheta.deta(sdev, .lsd , earg = .esd )
}
- ans <- c(w) * cbind(dl.dmu * dmu.deta,
- if ( .var.arg ) dl.dva * dva.deta else dl.dsd * dsd.deta)
+ ans <- c(w) *
+ cbind(dl.dmu * dmu.deta,
+ if ( .var.arg ) dl.dva * dva.deta else
+ dl.dsd * dsd.deta)
ans <- ans[, interleave.VGAM(ncol(ans), M = Musual)]
ans
- }), list( .lmean = lmean, .lsd = lsd, .lvar = lvar,
- .emean = emean, .esd = esd, .evar = evar,
+ }), list( .lmean = lmean, .lsd = lsd, .l.var = l.var,
+ .emean = emean, .esd = esd, .e.var = e.var,
.var.arg = var.arg ))),
weight = eval(substitute(expression({
- wz <- matrix(as.numeric(NA), n, M) # diag matrix; y is one-column too
+ wz <- matrix(as.numeric(NA), n, M) # diag matrix; y is 1-column too
- ed2l.dmu2 <- -1 / sdev^2
+ ned2l.dmu2 <- 1 / sdev^2
if ( .var.arg ) {
- ed2l.dva2 <- -0.5 / Varm^2
+ ned2l.dva2 <- 0.5 / Varm^2
} else {
- ed2l.dsd2 <- -2 / sdev^2
+ ned2l.dsd2 <- 2 / sdev^2
}
- wz[, Musual*(1:ncoly) - 1] <- -ed2l.dmu2 * dmu.deta^2
- if ( .var.arg ) {
- wz[, Musual*(1:ncoly) ] <- -ed2l.dva2 * dva.deta^2
+ wz[, Musual*(1:ncoly) - 1] <- ned2l.dmu2 * dmu.deta^2
+ wz[, Musual*(1:ncoly) ] <- if ( .var.arg ) {
+ ned2l.dva2 * dva.deta^2
} else {
- wz[, Musual*(1:ncoly) ] <- -ed2l.dsd2 * dsd.deta^2
+ ned2l.dsd2 * dsd.deta^2
}
- c(w) * wz
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = ncoly)
}), list( .var.arg = var.arg ))))
}
@@ -1792,99 +2001,545 @@ tobit.control <- function(save.weight = TRUE, ...)
+
+
+
+
+ normal1.term <-
+ function(linklist = NULL, # list(),
+ earglist = NULL, # list(),
+ lsd = "loge", lvar = "loge",
+ esd = list(), evar = list(),
+ var.arg = FALSE,
+ imethod = 1,
+ isd = NULL,
+ ieta.coeffs = NULL,
+ zero = "M")
+{
+
+
+
+
+ print("20120730; in normal1.term()")
+
+
+
+
+ lsd <- as.list(substitute(lsd))
+ esd <- link2list(lsd)
+ lsd <- attr(esd, "function.name")
+
+ lvar <- as.list(substitute(lvar))
+ e.var <- link2list(lvar)
+ l.var <- attr(e.var, "function.name")
+
+
+
+
+ if (is.character(zero) && zero != "M")
+ stop("bad input for argument 'zero'")
+
+
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 4)
+ stop("argument 'imethod' must be 1 or 2 or 3 or 4")
+
+
+ if (!is.logical(var.arg) || length(var.arg) != 1)
+ stop("argument 'var.arg' must be a single logical")
+
+
+
+ new("vglmff",
+ blurb = c("Univariate normal distribution with ",
+ "varying coefficients links/constraints\n\n",
+ "Links: ",
+ if (var.arg)
+ namesof("var", l.var, earg = e.var, tag = TRUE) else
+ namesof("sd" , lsd, earg = esd, tag = TRUE), "; ",
+ "\n",
+ if (var.arg) "Variance: var" else "Variance: sd^2"),
+
+ constraints = eval(substitute(expression({
+
+
+ dotzero <- .zero
+ if (is.character(dotzero) && dotzero == "M")
+ dotzero <- M
+
+ Musual <- M
+ eval(negzero.expression)
+ }), list( .zero = zero
+ ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = NA,
+ zero = .zero )
+ }, list( .zero = zero ))),
+
+ initialize = eval(substitute(expression({
+
+ asgn <- attr(x, "assign")
+ nasgn <- names(asgn)
+ asgn2 <- attr(Xm2, "assign")
+ nasgn2 <- names(asgn2)
+
+
+ print("head(x)")
+ print( head(x) )
+ print("head(Xm2)")
+ print( head(Xm2) )
+
+
+ print("attributes(x)")
+ print( attributes(x) )
+ print("attributes(Xm2)")
+ print( attributes(Xm2) )
+
+
+
+
+
+ print("names(constraints)")
+ print( names(constraints) )
+ print('nasgn')
+ print( nasgn )
+ print('nasgn2')
+ print( nasgn2 )
+
+
+ linklist <- .linklist
+ Linklist <- vector("list", length(nasgn2))
+ names(Linklist) <- nasgn2
+ for (ilocal in 1:length(nasgn2))
+ Linklist[[ilocal]] <- "identity"
+ if (length( linklist ) > 0) {
+ for (ilocal in 1:length(nasgn2))
+ if (any(names(linklist) == nasgn2[ilocal]))
+ Linklist[[ilocal]] <- linklist[[(nasgn2[ilocal])]]
+ }
+ print('linklist')
+ print( linklist )
+ print('Linklist')
+ print( Linklist )
+ print('unlist(Linklist)')
+ print( unlist(Linklist) )
+
+
+
+
+
+
+
+ orig.y <- y
+
+
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 1,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ number.mlogit <- sum(unlist(Linklist) == "mlogit")
+ print("number.mlogit")
+ print( number.mlogit )
+ if (number.mlogit == 1)
+ stop('cannot have only one "mlogit"')
+
+
+ ncoly <- ncol(y)
+ Musual <- NA
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- ncol(Xm2) - (number.mlogit > 0) + 1
+ print("M ,,,,,,,,,")
+ print( M )
+ extra$Xm2 <- Xm2
+
+
+
+ cn.Xm2 <- colnames(Xm2)
+ mynames1 <- NULL
+ for (ilocal in 1:length(cn.Xm2))
+ mynames1 <- c(mynames1,
+ namesof(cn.Xm2[ilocal], Linklist[[ilocal]],
+ list(), tag = FALSE))
+
+ print("mynames1")
+ print( mynames1 )
+
+ mynames2 <- paste(if ( .var.arg ) "var" else "sd",
+ if (ncoly > 1) 1:ncoly else "", sep = "")
+
+ predictors.names <-
+ c(mynames1,
+ if ( .var.arg )
+ namesof(mynames2, .l.var , earg = .e.var , tag = FALSE) else
+ namesof(mynames2, .lsd , earg = .esd , tag = FALSE))
+ print("predictors.names ,,,,,,,,,")
+ print( predictors.names )
+ extra$predictors.names <- predictors.names
+
+
+ if (!length(etastart)) {
+ sdev.init <- mean.init <- matrix(0, n, ncoly)
+ for (jay in 1:ncoly) {
+ jfit <- lm.wfit(x = Xm2, y = y[, jay], w = w[, jay])
+ mean.init[, jay] <- if ( mynames2 == "loge")
+ pmax(1/1024, y[, jay]) else
+ if( .imethod == 1) median(y[, jay]) else
+ if( .imethod == 2) weighted.mean(y[, jay], w = w[, jay]) else
+ if( .imethod == 3) weighted.mean(y[, jay], w = w[, jay]) *
+ 0.5 + y[, jay] * 0.5 else
+ mean(jfit$fitted)
+
+ sdev.init[, jay] <-
+ if( .imethod == 1) {
+ sqrt( sum(w[, jay] *
+ (y[, jay] - mean.init[, jay])^2) / sum(w[, jay]) )
+ } else if( .imethod == 2) {
+ if (jfit$df.resid > 0)
+ sqrt( sum(w[, jay] * jfit$resid^2) / jfit$df.resid ) else
+ sqrt( sum(w[, jay] * jfit$resid^2) / sum(w[, jay]) )
+ } else if( .imethod == 3) {
+ sqrt( sum(w[, jay] *
+ (y[, jay] - mean.init[, jay])^2) / sum(w[, jay]) )
+ } else {
+ sqrt( sum(w[, jay] * abs(y[, jay] -
+ mean.init[, jay])) / sum(w[, jay]) )
+ }
+
+ if (any(sdev.init[, jay] <= sqrt( .Machine$double.eps ) ))
+ sdev.init[, jay] <- 1.01
+
+ print("head(sdev.init[, jay])9")
+ print( head(sdev.init[, jay]) )
+ }
+
+
+ if (length( .i.sd )) {
+ sdev.init <- matrix( .i.sd , n, ncoly, byrow = TRUE)
+ }
+
+
+ etastart <-
+ cbind(eta.equi.probs,
+ if ( .var.arg )
+ theta2eta(sdev.init^2, .l.var , earg = .e.var ) else
+ theta2eta(sdev.init , .lsd , earg = .esd ))
+
+ colnames(etastart) <- predictors.names
+ print("head(etastart)9")
+ print( head(etastart) )
+
+ new.coeffs <- weighted.mean(y, w)
+ extra$new.coeffs <- new.coeffs
+
+ }
+ }), list( .linklist = linklist,
+ .earglist = earglist,
+ .lsd = lsd, .l.var = lvar,
+ .esd = esd, .e.var = evar,
+ .i.sd = isd,
+ .ieta.coeffs = ieta.coeffs,
+ .var.arg = var.arg, .imethod = imethod ))),
+
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ print("hi9")
+
+ M <- ncol(eta)
+ betas.matrix <- 1 / (1 + exp(-eta[, -M, drop = FALSE]))
+ betas.matrix <- cbind(betas.matrix,
+ 1 / (1 + rowSums(exp(eta[, -M, drop = FALSE]))))
+ print("head(betas.matrix)1")
+ print( head(betas.matrix) )
+
+ betas.matrix <- cbind(extra$new.coeffs[1], betas.matrix)
+
+ print("head(betas.matrix)2")
+ print( head(betas.matrix) )
+ print("head(extra$Xm2)")
+ print( head(extra$Xm2) )
+
+
+
+ rowSums(extra$Xm2 * betas.matrix)
+ }, list( .linklist = linklist,
+ .earglist = earglist,
+ .esd = esd , .e.var = evar ))),
+
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+ misc$link <- c(rep( "mlogit", length = M - 1),
+ rep( .lsd , length = ncoly))
+ temp.names <- c(mynames1, mynames2)
+ names(misc$link) <- temp.names
+
+
+
+
+ misc$var.arg <- .var.arg
+ misc$Musual <- Musual
+ misc$expected <- TRUE
+ misc$imethod <- .imethod
+ misc$multipleResponses <- FALSE
+ }), list( .linklist = linklist,
+ .earglist = earglist,
+ .lsd = lsd, .l.var = lvar,
+ .esd = esd, .e.var = evar,
+ .var.arg = var.arg, .imethod = imethod ))),
+
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ ncoly <- extra$ncoly
+ Musual <- 1 # extra$Musual
+ if ( .var.arg ) {
+ Varm <- eta2theta(eta[, Musual*(1:ncoly)], .l.var , earg = .e.var )
+ sdev <- sqrt(Varm)
+ } else {
+ sdev <- eta2theta(eta[, Musual*(1:ncoly)], .lsd , earg = .esd )
+ }
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dnorm(y, m = mu, sd = sdev, log = TRUE))
+ }
+ }, list( .lsd = lsd, .l.var = lvar,
+ .esd = esd, .e.var = evar,
+ .var.arg = var.arg ))),
+ vfamily = c("normal1.term"),
+ deriv = eval(substitute(expression({
+ print("------ in @ deriv -------------")
+ extra$new.coeffs <- new.coeffs
+
+ ncoly <- extra$ncoly
+ Musual <- 1 # extra$Musual
+
+ if ( .var.arg ) {
+ Varm <- eta2theta(eta[, Musual*(1:ncoly) ], .l.var , earg = .e.var )
+ sdev <- sqrt(Varm)
+ } else {
+ sdev <- eta2theta(eta[, Musual*(1:ncoly) ], .lsd , earg = .esd )
+ }
+
+
+
+ betas.matrix <- 1 / (1 + exp(-eta[, -M, drop = FALSE]))
+ betas.matrix <- cbind(betas.matrix,
+ 1 / (1 + rowSums(exp(eta[, -M, drop = FALSE]))))
+ print("head(betas.matrix)5")
+ print( head(betas.matrix) )
+
+ if ( !extra$sum1.intercept &&
+ any(colnames(extra$X_LM) == "(Intercept)"))
+ betas.matrix <- cbind(extra$new.coeffs[1], betas.matrix)
+
+ print("head(betas.matrix)6")
+ print( head(betas.matrix) )
+ print("head(extra$Xm2)")
+ print( head(extra$Xm2) )
+
+ use.x <- if ( sum1.intercept )
+ Xm2[, -ncol(Xm2), drop = FALSE] else
+ Xm2[, -c(1, ncol(Xm2)), drop = FALSE]
+ mymu <- rowSums(Xm2 * betas.matrix)
+ dMu.deta <- mymu * (1 - mymu) * use.x
+
+
+ print("head(mymu)9")
+ print( head(mymu) )
+ print("head(dMu.deta)9")
+ print( head(dMu.deta) )
+ if ( .var.arg ) {
+ dl.dva <- -0.5 / Varm + 0.5 * (y - mymu)^2 / sdev^4
+ } else {
+ dl.dsd <- -1.0 / sdev + (y - mymu)^2 / sdev^3
+ }
+ dl.dmu <- (y - mymu) / sdev^2
+
+
+ if ( .var.arg ) {
+ dva.deta <- dtheta.deta(Varm, .l.var , earg = .e.var )
+ } else {
+ dsd.deta <- dtheta.deta(sdev, .lsd , earg = .esd )
+ }
+
+ ans <- c(w) *
+ cbind(dl.dmu * dMu.deta,
+ if ( .var.arg ) dl.dva * dva.deta else
+ dl.dsd * dsd.deta)
+ print("head(deriv.ans)9")
+ print( head(ans) )
+ ans
+ }), list( .linklist = linklist, .lsd = lsd, .l.var = lvar,
+ .earglist = earglist, .esd = esd, .e.var = evar,
+ .var.arg = var.arg ))),
+ weight = eval(substitute(expression({
+ print("------ in @ weight -------------")
+ wz <- matrix(0, n, dimm(M)) # diag matrix; y is 1-column too
+ print("head(wz)")
+ print( head(wz) )
+
+ if ( .var.arg ) {
+ ned2l.dva2 <- 0.5 / Varm^2
+ } else {
+ ned2l.dsd2 <- 2 / sdev^2
+ }
+
+
+
+
+
+ wz[, iam(M, M, M = M)] <- if ( .var.arg ) {
+ ned2l.dva2 * dva.deta^2
+ } else {
+ ned2l.dsd2 * dsd.deta^2
+ }
+
+
+ index = iam(NA, NA, M , both = TRUE, diag = TRUE)
+ indtw = iam(NA, NA, M-1, both = TRUE, diag = TRUE)
+ print("index")
+ print( index )
+ print("indtw")
+ print( indtw )
+
+
+ twz = dMu.deta[, indtw$row.index, drop = FALSE] *
+ dMu.deta[, indtw$col.index, drop = FALSE]
+ print("head(twz)9------------------------------------------------")
+ print( head(twz) )
+
+
+ for (ilocal in 1:ncol(twz))
+ wz[, iam(index$row.index[ilocal],
+ index$col.index[ilocal], M = M)] <-
+ twz[, iam(indtw$row.index[ilocal],
+ indtw$col.index[ilocal], M = M-1)]
+
+
+ print("head(wz)9------------------------------------------------")
+ print( head(wz) )
+
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = ncoly)
+ }), list( .var.arg = var.arg ))))
+} # End of normal1.term()
+
+
+
+
+
lognormal <- function(lmeanlog = "identity", lsdlog = "loge",
- emeanlog = list(), esdlog = list(),
zero = 2)
{
- if (mode(lmeanlog) != "character" && mode(lmeanlog) != "name")
- lmeanlog = as.character(substitute(lmeanlog))
- if (mode(lsdlog) != "character" && mode(lsdlog) != "name")
- lsdlog = as.character(substitute(lsdlog))
- if (length(zero) &&
- (!is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
- zero > 2))
- stop("bad input for argument argument 'zero'")
- if (!is.list(emeanlog)) emeanlog = list()
- if (!is.list(esdlog)) esdlog = list()
- new("vglmff",
- blurb = c("Two-parameter (univariate) lognormal distribution\n\n",
- "Links: ",
- namesof("meanlog", lmeanlog, earg = emeanlog, tag = TRUE), ", ",
- namesof("sdlog", lsdlog, earg = esdlog, tag = TRUE)),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- if (min(y) <= 0) stop("response must be positive")
-
- predictors.names =
- c(namesof("meanlog", .lmeanlog, earg = .emeanlog, tag = FALSE),
- namesof("sdlog", .lsdlog, earg = .esdlog, tag = FALSE))
-
- if (!length(etastart)) {
- mylm = lm.wfit(x = x, y=log(y), w = w)
- sdlog.y.est = sqrt( sum(w * mylm$resid^2) / mylm$df.residual )
- etastart = cbind(
- meanlog = rep(theta2eta(log(median(y)), .lmeanlog,
- earg = .emeanlog), length = n),
- sdlog = rep(theta2eta(sdlog.y.est, .lsdlog,
- earg = .esdlog), length = n))
- }
- }), list( .lmeanlog = lmeanlog, .lsdlog = lsdlog,
- .emeanlog = emeanlog, .esdlog = esdlog ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- mulog = eta2theta(eta[,1], .lmeanlog, earg = .emeanlog)
- sdlog = eta2theta(eta[,2], .lsdlog, earg = .esdlog)
- exp(mulog + 0.5 * sdlog^2)
- }, list( .lmeanlog = lmeanlog, .lsdlog = lsdlog,
- .emeanlog = emeanlog, .esdlog = esdlog ))),
- last = eval(substitute(expression({
- misc$link = c("meanlog" = .lmeanlog, "sdlog" = .lsdlog)
- misc$earg = list("meanlog" = .emeanlog, "sdlog" = .esdlog)
- misc$expected = TRUE
- }), list( .lmeanlog = lmeanlog, .lsdlog = lsdlog,
- .emeanlog = emeanlog, .esdlog = esdlog ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mulog = eta2theta(eta[,1], .lmeanlog, earg = .emeanlog)
- sdlog = eta2theta(eta[,2], .lsdlog, earg = .esdlog)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dlnorm(y, meanlog = mulog, sdlog = sdlog, log = TRUE))
- }
- }, list( .lmeanlog = lmeanlog, .lsdlog = lsdlog,
- .emeanlog = emeanlog, .esdlog = esdlog ))),
- vfamily = c("lognormal"),
- deriv = eval(substitute(expression({
- mulog = eta2theta(eta[,1], .lmeanlog, earg = .emeanlog)
- sdlog = eta2theta(eta[,2], .lsdlog, earg = .esdlog)
- dmulog.deta = dtheta.deta(mulog, .lmeanlog, earg = .emeanlog)
- dsdlog.deta = dtheta.deta(sdlog, .lsdlog, earg = .esdlog)
-
- dl.dmulog = (log(y) - mulog) / sdlog^2
- dl.dsdlog = -1 / sdlog + (log(y) - mulog)^2 / sdlog^3
- dl.dlambda = (1 + (log(y) - mulog) / sdlog^2) / y
-
- c(w) * cbind(dl.dmulog * dmulog.deta,
- dl.dsdlog * dsdlog.deta)
- }), list( .lmeanlog = lmeanlog, .lsdlog = lsdlog,
- .emeanlog = emeanlog, .esdlog = esdlog ))),
- weight = expression({
- wz = matrix(as.numeric(NA), n, 2) # Diagonal!
- ed2l.dmulog2 = 1 / sdlog^2
- ed2l.dsdlog2 = 2 * ed2l.dmulog2
- wz[,iam(1,1,M)] = ed2l.dmulog2 * dmulog.deta^2
- wz[,iam(2,2,M)] = ed2l.dsdlog2 * dsdlog.deta^2
-
- wz = c(w) * wz
- wz
- }))
+ lmulog <- as.list(substitute(lmeanlog))
+ emulog <- link2list(lmulog)
+ lmulog <- attr(emulog, "function.name")
+
+ lsdlog <- as.list(substitute(lsdlog))
+ esdlog <- link2list(lsdlog)
+ lsdlog <- attr(esdlog, "function.name")
+
+
+
+
+ if (length(zero) &&
+ (!is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
+ zero > 2))
+ stop("bad input for argument argument 'zero'")
+
+
+ new("vglmff",
+ blurb = c("Two-parameter (univariate) lognormal distribution\n\n",
+ "Links: ",
+ namesof("meanlog", lmulog, earg = emulog, tag = TRUE), ", ",
+ namesof("sdlog", lsdlog, earg = esdlog, tag = TRUE)),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE)
+
+
+
+ predictors.names <-
+ c(namesof("meanlog", .lmulog, earg = .emulog, tag = FALSE),
+ namesof("sdlog", .lsdlog, earg = .esdlog, tag = FALSE))
+
+ if (!length(etastart)) {
+ mylm = lm.wfit(x = x, y = log(y), w = w)
+ sdlog.y.est = sqrt( sum(c(w) * mylm$resid^2) / mylm$df.residual )
+ etastart = cbind(
+ meanlog = rep(theta2eta(log(median(y)), .lmulog,
+ earg = .emulog), length = n),
+ sdlog = rep(theta2eta(sdlog.y.est, .lsdlog,
+ earg = .esdlog), length = n))
+ }
+ }), list( .lmulog = lmulog, .lsdlog = lsdlog,
+ .emulog = emulog, .esdlog = esdlog ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ mulog = eta2theta(eta[, 1], .lmulog , earg = .emulog )
+ sdlog = eta2theta(eta[, 2], .lsdlog , earg = .esdlog )
+ exp(mulog + 0.5 * sdlog^2)
+ }, list( .lmulog = lmulog, .lsdlog = lsdlog,
+ .emulog = emulog, .esdlog = esdlog ))),
+ last = eval(substitute(expression({
+ misc$link = c("meanlog" = .lmulog , "sdlog" = .lsdlog )
+ misc$earg = list("meanlog" = .emulog , "sdlog" = .esdlog )
+
+ misc$expected = TRUE
+ }), list( .lmulog = lmulog, .lsdlog = lsdlog,
+ .emulog = emulog, .esdlog = esdlog ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ mulog = eta2theta(eta[, 1], .lmulog, earg = .emulog)
+ sdlog = eta2theta(eta[, 2], .lsdlog, earg = .esdlog)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dlnorm(y, meanlog = mulog, sdlog = sdlog, log = TRUE))
+ }
+ }, list( .lmulog = lmulog, .lsdlog = lsdlog,
+ .emulog = emulog, .esdlog = esdlog ))),
+ vfamily = c("lognormal"),
+ deriv = eval(substitute(expression({
+ mulog = eta2theta(eta[, 1], .lmulog, earg = .emulog)
+ sdlog = eta2theta(eta[, 2], .lsdlog, earg = .esdlog)
+
+ dmulog.deta = dtheta.deta(mulog, .lmulog, earg = .emulog)
+ dsdlog.deta = dtheta.deta(sdlog, .lsdlog, earg = .esdlog)
+
+ dl.dmulog = (log(y) - mulog) / sdlog^2
+ dl.dsdlog = -1 / sdlog + (log(y) - mulog)^2 / sdlog^3
+
+ c(w) * cbind(dl.dmulog * dmulog.deta,
+ dl.dsdlog * dsdlog.deta)
+ }), list( .lmulog = lmulog, .lsdlog = lsdlog,
+ .emulog = emulog, .esdlog = esdlog ))),
+ weight = expression({
+ wz = matrix(as.numeric(NA), n, 2) # Diagonal!
+ ned2l.dmulog2 = 1 / sdlog^2
+ ned2l.dsdlog2 = 2 * ned2l.dmulog2
+
+ wz[, iam(1, 1, M)] = ned2l.dmulog2 * dmulog.deta^2
+ wz[, iam(2, 2, M)] = ned2l.dsdlog2 * dsdlog.deta^2
+
+ wz = c(w) * wz
+ wz
+ }))
}
@@ -1893,148 +2548,171 @@ tobit.control <- function(save.weight = TRUE, ...)
lognormal3 <- function(lmeanlog = "identity", lsdlog = "loge",
- emeanlog = list(), esdlog = list(),
powers.try = (-3):3,
delta = NULL, zero = 2)
{
- if (length(delta) &&
- !is.Numeric(delta, positive = TRUE))
- stop("bad input for argument argument 'delta'")
- if (mode(lmeanlog) != "character" && mode(lmeanlog) != "name")
- lmeanlog = as.character(substitute(lmeanlog))
- if (mode(lsdlog) != "character" && mode(lsdlog) != "name")
- lsdlog = as.character(substitute(lsdlog))
- if (length(zero) &&
- (!is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
- zero > 3))
- stop("bad input for argument argument 'zero'")
+ if (length(delta) &&
+ !is.Numeric(delta, positive = TRUE))
+ stop("bad input for argument argument 'delta'")
- if (!is.list(emeanlog)) emeanlog = list()
- if (!is.list(esdlog)) esdlog = list()
- new("vglmff",
- blurb = c("Three-parameter (univariate) lognormal distribution\n\n",
- "Links: ",
- namesof("meanlog", lmeanlog, earg = emeanlog, tag = TRUE),
- "; ", namesof("sdlog", lsdlog, earg = esdlog, tag = TRUE),
- "; ", namesof("lambda", "identity", earg = list(), tag = TRUE)),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("meanlog", .lmeanlog, earg = .emeanlog, tag = FALSE),
- namesof("sdlog", .lsdlog, earg = .esdlog, tag = FALSE),
- "lambda")
-
- if (!length(etastart)) {
- miny = min(y)
- if (length( .delta)) {
- lambda.init = rep(miny- .delta, length = n)
- } else {
- pvalue.vec = NULL
- powers.try = .powers.try
- for(delta in 10^powers.try) {
- pvalue.vec = c(pvalue.vec,
- shapiro.test(sample(log(y-miny+delta),
- size=min(5000, length(y ))))$p.value)
- }
- index.lambda = (1:length(powers.try))[pvalue.vec ==
- max(pvalue.vec)]
- lambda.init = miny - 10^powers.try[index.lambda]
- }
- mylm = lm.wfit(x = x, y=log(y-lambda.init), w = w)
- sdlog.y.est = sqrt( sum(w * mylm$resid^2) / mylm$df.residual )
- etastart = cbind(mu = log(median(y - lambda.init)),
- sdlog = rep(theta2eta(sdlog.y.est, .lsdlog, earg = .esdlog),
- length = n),
- lambda = lambda.init)
- }
- }), list( .lmeanlog = lmeanlog, .lsdlog = lsdlog,
- .emeanlog = emeanlog, .esdlog = esdlog,
- .delta = delta, .powers.try = powers.try ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- mymu = eta2theta(eta[,1], .lmeanlog, earg = .emeanlog)
- sdlog = eta2theta(eta[,2], .lsdlog, earg = .esdlog)
- lambda = eta2theta(eta[,3], "identity", earg = list())
- lambda + exp(mymu + 0.5 * sdlog^2)
- }, list( .lmeanlog = lmeanlog, .lsdlog = lsdlog,
- .emeanlog = emeanlog, .esdlog = esdlog ))),
- last = eval(substitute(expression({
- misc$link = c("meanlog" = .lmeanlog,
- "sdlog" = .lsdlog,
- "lambda" = "identity")
- misc$earg = list("meanlog" = .emeanlog,
- "sdlog" = .esdlog,
- "lambda" = list())
- misc$expected = TRUE
- }), list( .lmeanlog = lmeanlog, .lsdlog = lsdlog,
- .emeanlog = emeanlog, .esdlog = esdlog ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mymu = eta2theta(eta[,1], .lmeanlog, earg = .emeanlog)
- sdlog = eta2theta(eta[,2], .lsdlog, earg = .esdlog)
- lambda = eta2theta(eta[,3], "identity", earg = list())
- if (any(y < lambda))
- warning("bad 'y'")
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w*dlnorm(y-lambda, meanlog=mymu, sdlog = sdlog, log = TRUE))
+
+ lmulog <- as.list(substitute(lmeanlog))
+ emulog <- link2list(lmulog)
+ lmulog <- attr(emulog, "function.name")
+
+ lsdlog <- as.list(substitute(lsdlog))
+ esdlog <- link2list(lsdlog)
+ lsdlog <- attr(esdlog, "function.name")
+
+
+
+
+ if (length(zero) &&
+ (!is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
+ zero > 3))
+ stop("bad input for argument argument 'zero'")
+
+
+
+
+
+ new("vglmff",
+ blurb = c("Three-parameter (univariate) lognormal distribution\n\n",
+ "Links: ",
+ namesof("meanlog", lmulog, earg = emulog, tag = TRUE), "; ",
+ namesof("sdlog", lsdlog, earg = esdlog, tag = TRUE), "; ",
+ namesof("lambda", "identity", earg = list(), tag = TRUE)),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y)
+
+
+
+ predictors.names <-
+ c(namesof("meanlog", .lmulog, earg = .emulog, tag = FALSE),
+ namesof("sdlog", .lsdlog, earg = .esdlog, tag = FALSE),
+ "lambda")
+
+ if (!length(etastart)) {
+ miny = min(y)
+ if (length( .delta)) {
+ lambda.init = rep(miny- .delta, length = n)
+ } else {
+ pvalue.vec = NULL
+ powers.try = .powers.try
+ for(delta in 10^powers.try) {
+ pvalue.vec = c(pvalue.vec,
+ shapiro.test(sample(log(y-miny+delta),
+ size=min(5000, length(y ))))$p.value)
}
- }, list( .lmeanlog = lmeanlog, .lsdlog = lsdlog,
- .emeanlog = emeanlog, .esdlog = esdlog ))),
- vfamily = c("lognormal3"),
- deriv = eval(substitute(expression({
- mymu = eta2theta(eta[,1], .lmeanlog, earg = .emeanlog)
- sdlog = eta2theta(eta[,2], .lsdlog, earg = .esdlog)
- lambda = eta2theta(eta[,3], "identity", earg = list())
- if (any(y < lambda))
- warning("bad 'y'")
- dl.dmymu = (log(y-lambda)-mymu) / sdlog^2
- dl.dsdlog = -1/sdlog + (log(y-lambda)-mymu)^2 / sdlog^3
- dl.dlambda = (1 + (log(y-lambda)-mymu) / sdlog^2) / (y-lambda)
- dmymu.deta = dtheta.deta(mymu, .lmeanlog, earg = .emeanlog)
- dsdlog.deta = dtheta.deta(sdlog, .lsdlog, earg = .esdlog)
- dlambda.deta = dtheta.deta(lambda, "identity", earg = list())
- c(w) * cbind(dl.dmymu * dmymu.deta,
- dl.dsdlog * dsdlog.deta,
- dl.dlambda * dlambda.deta)
- }), list( .lmeanlog = lmeanlog, .lsdlog = lsdlog,
- .emeanlog = emeanlog, .esdlog = esdlog ))),
- weight = expression({
- wz = matrix(0, n, dimm(M))
- ed2l.dmymu2 = 1 / sdlog^2
- ed2l.dsdlog = 2 / sdlog^2
- temp9 = exp(-mymu+sdlog^2 / 2)
- ed2l.dlambda2 = exp(2*(-mymu+sdlog^2)) * (1+sdlog^2) / sdlog^2
- wz[,iam(1,1,M)] = ed2l.dmymu2 * dmymu.deta^2
- wz[,iam(2,2,M)] = ed2l.dsdlog * dsdlog.deta^2
- wz[,iam(3,3,M)] = ed2l.dlambda2 * dlambda.deta^2
- wz[,iam(1,3,M)] = temp9 * dmymu.deta * dlambda.deta / sdlog^2
- wz[,iam(2,3,M)] = -2 * temp9 / sdlog * dsdlog.deta * dlambda.deta
- wz = c(w) * wz
- wz
- }))
+ index.lambda = (1:length(powers.try))[pvalue.vec ==
+ max(pvalue.vec)]
+ lambda.init = miny - 10^powers.try[index.lambda]
+ }
+ mylm = lm.wfit(x = x, y=log(y-lambda.init), w = w)
+ sdlog.y.est = sqrt( sum(c(w) * mylm$resid^2) / mylm$df.residual )
+ etastart = cbind(mu = log(median(y - lambda.init)),
+ sdlog = rep(theta2eta(sdlog.y.est, .lsdlog, earg = .esdlog),
+ length = n),
+ lambda = lambda.init)
+ }
+ }), list( .lmulog = lmulog, .lsdlog = lsdlog,
+ .emulog = emulog, .esdlog = esdlog,
+ .delta = delta, .powers.try = powers.try ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ mymu = eta2theta(eta[, 1], .lmulog, earg = .emulog)
+ sdlog = eta2theta(eta[, 2], .lsdlog, earg = .esdlog)
+ lambda = eta2theta(eta[, 3], "identity", earg = list(theta = NULL))
+ lambda + exp(mymu + 0.5 * sdlog^2)
+ }, list( .lmulog = lmulog, .lsdlog = lsdlog,
+ .emulog = emulog, .esdlog = esdlog ))),
+ last = eval(substitute(expression({
+ misc$link = c("meanlog" = .lmulog,
+ "sdlog" = .lsdlog,
+ "lambda" = "identity")
+
+ misc$earg = list("meanlog" = .emulog,
+ "sdlog" = .esdlog,
+ "lambda" = list())
+
+ misc$expected = TRUE
+ }), list( .lmulog = lmulog, .lsdlog = lsdlog,
+ .emulog = emulog, .esdlog = esdlog ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ mymu = eta2theta(eta[, 1], .lmulog , earg = .emulog)
+ sdlog = eta2theta(eta[, 2], .lsdlog , earg = .esdlog)
+ lambda = eta2theta(eta[, 3], "identity", earg = list(theta = NULL))
+ if (any(y < lambda))
+ warning("bad 'y'")
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dlnorm(y - lambda, meanlog = mymu,
+ sdlog = sdlog, log = TRUE))
+ }
+ }, list( .lmulog = lmulog, .lsdlog = lsdlog,
+ .emulog = emulog, .esdlog = esdlog ))),
+ vfamily = c("lognormal3"),
+ deriv = eval(substitute(expression({
+ mymu = eta2theta(eta[, 1], .lmulog, earg = .emulog)
+ sdlog = eta2theta(eta[, 2], .lsdlog, earg = .esdlog)
+ lambda = eta2theta(eta[, 3], "identity", earg = list(theta = NULL))
+
+ if (any(y < lambda))
+ warning("bad 'y'")
+
+ dl.dmymu <- (log(y-lambda)-mymu) / sdlog^2
+ dl.dsdlog <- -1/sdlog + (log(y-lambda)-mymu)^2 / sdlog^3
+ dl.dlambda <- (1 + (log(y-lambda)-mymu) / sdlog^2) / (y-lambda)
+
+ dmymu.deta <- dtheta.deta(mymu, .lmulog, earg = .emulog)
+ dsdlog.deta <- dtheta.deta(sdlog, .lsdlog, earg = .esdlog)
+ dlambda.deta <- dtheta.deta(lambda, "identity", earg = list())
+
+ c(w) * cbind(dl.dmymu * dmymu.deta,
+ dl.dsdlog * dsdlog.deta,
+ dl.dlambda * dlambda.deta)
+ }), list( .lmulog = lmulog, .lsdlog = lsdlog,
+ .emulog = emulog, .esdlog = esdlog ))),
+ weight = expression({
+ wz <- matrix(0, n, dimm(M))
+
+ ned2l.dmymu2 <- 1 / sdlog^2
+ ned2l.dsdlog <- 2 / sdlog^2
+ temp9 <- exp(-mymu + sdlog^2 / 2)
+ ned2l.dlambda2 <- exp(2*(-mymu+sdlog^2)) * (1+sdlog^2) / sdlog^2
+
+ wz[, iam(1, 1, M)] <- ned2l.dmymu2 * dmymu.deta^2
+ wz[, iam(2, 2, M)] <- ned2l.dsdlog * dsdlog.deta^2
+ wz[, iam(3, 3, M)] <- ned2l.dlambda2 * dlambda.deta^2
+ wz[, iam(1, 3, M)] <- temp9 * dmymu.deta * dlambda.deta / sdlog^2
+ wz[, iam(2, 3, M)] <- -2 * temp9 / sdlog * dsdlog.deta * dlambda.deta
+ wz <- c(w) * wz
+ wz
+ }))
}
-dsnorm = function(x, location = 0, scale = 1, shape = 0, log = FALSE) {
+dsnorm <- function(x, location = 0, scale = 1, shape = 0, log = FALSE) {
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
+
if (!is.Numeric(scale, positive = TRUE))
stop("bad input for argument 'scale'")
- zedd = (x - location) / scale
- loglik = log(2) + dnorm(zedd, log = TRUE) +
+ zedd <- (x - location) / scale
+ loglik <- log(2) + dnorm(zedd, log = TRUE) +
pnorm(shape * zedd, log.p = TRUE) -
log(scale)
if (log.arg) {
@@ -2046,7 +2724,7 @@ dsnorm = function(x, location = 0, scale = 1, shape = 0, log = FALSE) {
-rsnorm = function(n, location = 0, scale = 1, shape=0) {
+rsnorm <- function(n, location = 0, scale = 1, shape=0) {
if (!is.Numeric(n, positive = TRUE,
integer.valued = TRUE, allowable.length = 1))
stop("bad input for argument 'n'")
@@ -2055,113 +2733,136 @@ rsnorm = function(n, location = 0, scale = 1, shape=0) {
if (!is.Numeric(shape))
stop("bad input for argument 'shape'")
- rho = shape / sqrt(1 + shape^2)
- u0 = rnorm(n)
- v = rnorm(n)
- u1 = rho*u0 + sqrt(1 - rho^2) * v
+ rho <- shape / sqrt(1 + shape^2)
+ u0 <- rnorm(n)
+ v <- rnorm(n)
+ u1 <- rho*u0 + sqrt(1 - rho^2) * v
location + scale * ifelse(u0 >= 0, u1, -u1)
}
- skewnormal1 = function(lshape = "identity", earg = list(), ishape = NULL,
- nsimEIM = NULL)
+ skewnormal1 <- function(lshape = "identity",
+ ishape = NULL,
+ nsimEIM = NULL)
{
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
- if (!is.list(earg)) earg = list()
- if (length(nsimEIM) &&
- (!is.Numeric(nsimEIM, allowable.length = 1,
- integer.valued = TRUE) ||
- nsimEIM <= 10))
- stop("argument 'nsimEIM' should be an integer greater than 10")
- new("vglmff",
- blurb = c("1-parameter Skew-normal distribution\n\n",
- "Link: ",
- namesof("shape", lshape, earg = earg), "\n",
- "Mean: shape * sqrt(2 / (pi * (1+shape^2 )))\n",
- "Variance: 1-mu^2"),
- infos = eval(substitute(function(...) {
- list(Musual = 1,
- nsimEIM = .nsimEIM)
- }, list( .nsimEIM = nsimEIM ))),
- initialize = eval(substitute(expression({
- y = cbind(y)
- if (ncol(y) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- namesof("shape", .lshape, earg = .earg, tag = FALSE)
- if (!length(etastart)) {
- init.shape = if (length( .ishape))
- rep( .ishape, len = n) else {
- temp = y
- index = abs(y) < sqrt(2/pi)-0.01
- temp[!index] = y[!index]
- temp[index] = sign(y[index])/sqrt(2/(pi*y[index]*y[index])-1)
- temp
- }
- etastart = matrix(init.shape, n, ncol(y))
- }
- }), list( .lshape = lshape, .earg = earg, .ishape = ishape ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- alpha = eta2theta(eta, .lshape, earg = .earg)
- alpha * sqrt(2/(pi * (1+alpha^2 )))
- }, list( .earg = earg, .lshape = lshape ))),
- last = eval(substitute(expression({
- misc$link = c(shape = .lshape)
- misc$earg = list(shape = .earg )
- misc$nsimEIM = .nsimEIM
- misc$expected = (length( .nsimEIM ) > 0)
- }), list( .earg = earg, .lshape = lshape, .nsimEIM = nsimEIM ))),
- linkfun = eval(substitute(function(mu, extra = NULL) {
- alpha = mu / sqrt(2/pi - mu^2)
- theta2eta(alpha, .lshape, earg = .earg)
- }, list( .earg = earg, .lshape = lshape ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- alpha = eta2theta(eta, .lshape, earg = .earg)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dsnorm(x = y, location = 0, scale = 1,
- shape = alpha, log = TRUE))
- }
- }, list( .earg = earg, .lshape = lshape ))),
- vfamily = c("skewnormal1"),
- deriv = eval(substitute(expression({
- alpha = eta2theta(eta, .lshape, earg = .earg)
- zedd = y*alpha
- tmp76 = pnorm(zedd)
- tmp86 = dnorm(zedd)
- dl.dshape = tmp86 * y / tmp76
- dshape.deta = dtheta.deta(alpha, .lshape, earg = .earg)
- w * dl.dshape * dshape.deta
- }), list( .earg = earg, .lshape = lshape ))),
- weight = eval(substitute(expression({
- if ( length( .nsimEIM )) {
- run.mean = 0
- for(ii in 1:( .nsimEIM)) {
- ysim = rsnorm(n, location = 0, scale = 1, shape = alpha)
- zedd = ysim*alpha
- tmp76 = pnorm(zedd)
- tmp86 = dnorm(zedd)
- d2l.dshape2 = -ysim*ysim*tmp86*(tmp76*zedd+tmp86)/tmp76^2
- rm(ysim)
- run.mean = ((ii-1) * run.mean + d2l.dshape2) / ii
- }
- if (intercept.only)
- run.mean = mean(run.mean)
- wz = -w * (dshape.deta^2) * run.mean
- } else {
- d2shape.deta2 = d2theta.deta2(alpha, .lshape, earg = .earg)
- d2l.dshape2 = -y*y * tmp86 * (tmp76 * zedd + tmp86) / tmp76^2
- wz = -(dshape.deta^2) * d2l.dshape2 - d2shape.deta2 * dl.dshape
- wz = c(w) * wz
- }
- wz
- }), list( .earg = earg, .lshape = lshape, .nsimEIM = nsimEIM ))))
+ lshape <- as.list(substitute(lshape))
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
+
+
+ if (length(nsimEIM) &&
+ (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE) ||
+ nsimEIM <= 10))
+ stop("argument 'nsimEIM' should be an integer greater than 10")
+
+
+ new("vglmff",
+ blurb = c("1-parameter skew-normal distribution\n\n",
+ "Link: ",
+ namesof("shape", lshape , earg = eshape ), "\n",
+ "Mean: shape * sqrt(2 / (pi * (1 + shape^2 )))\n",
+ "Variance: 1-mu^2"),
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ nsimEIM = .nsimEIM)
+ }, list( .nsimEIM = nsimEIM ))),
+ initialize = eval(substitute(expression({
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 1,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+ predictors.names <-
+ namesof("shape", .lshape , earg = .eshape , tag = FALSE)
+
+ if (!length(etastart)) {
+ init.shape <- if (length( .ishape))
+ rep( .ishape, len = n) else {
+ temp <- y
+ index <- abs(y) < sqrt(2/pi)-0.01
+ temp[!index] <- y[!index]
+ temp[index] <- sign(y[index]) / sqrt(2/(pi*y[index]*y[index])-1)
+ temp
+ }
+ etastart <- matrix(init.shape, n, ncol(y))
+ }
+ }), list( .lshape = lshape, .eshape = eshape,
+ .ishape = ishape ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ alpha <- eta2theta(eta, .lshape, earg = .eshape)
+ alpha * sqrt(2/(pi * (1+alpha^2 )))
+ }, list( .eshape = eshape, .lshape = lshape ))),
+ last = eval(substitute(expression({
+ misc$link <- c(shape = .lshape)
+
+ misc$earg <- list(shape = .eshape )
+
+ misc$nsimEIM = .nsimEIM
+ misc$expected <- (length( .nsimEIM ) > 0)
+ }), list( .eshape = eshape, .lshape = lshape,
+ .nsimEIM = nsimEIM ))),
+ linkfun = eval(substitute(function(mu, extra = NULL) {
+ alpha <- mu / sqrt(2/pi - mu^2)
+ theta2eta(alpha, .lshape, earg = .eshape)
+ }, list( .eshape = eshape, .lshape = lshape ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ alpha <- eta2theta(eta, .lshape, earg = .eshape)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dsnorm(x = y, location = 0, scale = 1,
+ shape = alpha, log = TRUE))
+ }
+ }, list( .eshape = eshape, .lshape = lshape ))),
+ vfamily = c("skewnormal1"),
+ deriv = eval(substitute(expression({
+ alpha <- eta2theta(eta, .lshape, earg = .eshape)
+
+ zedd <- y*alpha
+ tmp76 <- pnorm(zedd)
+ tmp86 <- dnorm(zedd)
+ dl.dshape <- tmp86 * y / tmp76
+
+ dshape.deta <- dtheta.deta(alpha, .lshape, earg = .eshape)
+
+ c(w) * dl.dshape * dshape.deta
+ }), list( .eshape = eshape, .lshape = lshape ))),
+ weight = eval(substitute(expression({
+ if ( length( .nsimEIM )) {
+ run.mean = 0
+ for(ii in 1:( .nsimEIM)) {
+ ysim = rsnorm(n, location = 0, scale = 1, shape = alpha)
+ zedd = ysim*alpha
+ tmp76 = pnorm(zedd)
+ tmp86 = dnorm(zedd)
+ d2l.dshape2 = -ysim*ysim*tmp86*(tmp76*zedd+tmp86)/tmp76^2
+ rm(ysim)
+ run.mean = ((ii-1) * run.mean + d2l.dshape2) / ii
+ }
+ if (intercept.only)
+ run.mean = mean(run.mean)
+ wz = -c(w) * (dshape.deta^2) * run.mean
+ } else {
+ d2shape.deta2 = d2theta.deta2(alpha, .lshape, earg = .eshape)
+ d2l.dshape2 = -y*y * tmp86 * (tmp76 * zedd + tmp86) / tmp76^2
+ wz = -(dshape.deta^2) * d2l.dshape2 - d2shape.deta2 * dl.dshape
+ wz = c(w) * wz
+ }
+ wz
+ }), list( .eshape = eshape, .lshape = lshape, .nsimEIM = nsimEIM ))))
}
diff --git a/R/family.others.R b/R/family.others.R
index f739b1a..a1f3ea6 100644
--- a/R/family.others.R
+++ b/R/family.others.R
@@ -1,13 +1,11 @@
-# These functions are Copyright (C) 1998-2012 T. W. Yee All rights reserved.
+# These functions are
+# Copyright (C) 1998-2012 T.W. Yee, University of Auckland.
+# All rights reserved.
+
-# family.others.R
-# This file contains functions written by other people.
-# Last modified:
-# 20110317: (a) James Lauder files.
-# This file is in one part: see (a), and later (b), (c).
@@ -15,27 +13,16 @@
-# ----------------------------------------------------------------------
-# (a) James Lauder code put here.
-# ----------------------------------------------------------------------
-# Edited from james.familyfuncs.2.R on 20110317
-# ----------------------------------------------------------------------
-# 13/12/10; [drpq]exppois() and exppois().
-# reference: Karlis CSDA 53 (2009) pg 894 and
-# Kus CSDA 51 (2007) pg 4497
-# everything functioning except for hypergeometric function
-# (see R package "hypergeo")
-# ref: Kus, section 4.1, pg 4500
-# updated on 22/15/2010
dexppois <- function(x, lambda, betave = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
+
N <- max(length(x), length(lambda), length(betave))
x <- rep(x, len = N); lambda = rep(lambda, len = N);
betave <- rep(betave, len = N)
@@ -54,8 +41,6 @@ dexppois <- function(x, lambda, betave = 1, log = FALSE) {
}
-# ref: calculated from F(x) from Kus, pg 4499
-# updated and working on 22/15/2010
qexppois<- function(p, lambda, betave = 1) {
ans <- -log(log(p * -(expm1(lambda)) +
exp(lambda)) / lambda) / betave
@@ -67,10 +52,9 @@ qexppois<- function(p, lambda, betave = 1) {
-# ref: Kus, eqn 2, pg 4499
-# Updated on 22/12/2010
pexppois<- function(q, lambda, betave = 1) {
- ans <-(exp(lambda * exp(-betave * q)) - exp(lambda)) / -expm1(lambda)
+ ans <-(exp(lambda * exp(-betave * q)) -
+ exp(lambda)) / -expm1(lambda)
ans[q <= 0] <- 0
ans[(lambda <= 0) | (betave <= 0)] <- NaN
ans
@@ -78,8 +62,6 @@ pexppois<- function(q, lambda, betave = 1) {
-# ref: calculated from F(x) from Kus, pg 4499
-# updated and working on 22/15/2010
rexppois <- function(n, lambda, betave = 1) {
ans <- -log(log(runif(n) * -(expm1(lambda)) +
exp(lambda)) / lambda) / betave
@@ -92,112 +74,101 @@ rexppois <- function(n, lambda, betave = 1) {
-###################
-# the family function
-# reference: Karlis CSDA 53 (2009) pg 894 and
-# Kus CSDA 51 (2007) pg 4497
-#
-# Notes:
-# 1. Requires the \pkg{hypergeo} package
-# (to use their \code{\link[hypergeo]{genhypergeo}} function).
- exppoisson = function (llambda = "loge", lbetave = "loge",
- elambda = list(), ebetave = list(),
+ exppoisson <- function(llambda = "loge", lbetave = "loge",
ilambda = 1.1, ibetave = 2.0,
zero = NULL) {
- if (mode(llambda) != "character" && mode(llambda) != "name")
- llambda = as.character(substitute(llambda))
- if (mode(lbetave) != "character" && mode(lbetave) != "name")
- lbetave = as.character(substitute(lbetave))
+ llambda <- as.list(substitute(llambda))
+ elambda <- link2list(llambda)
+ llambda <- attr(elambda, "function.name")
+
+ lbetave <- as.list(substitute(lbetave))
+ ebetave <- link2list(lbetave)
+ lbetave <- attr(ebetave, "function.name")
+
+
+
+
- if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (length(ilambda) && !is.Numeric(ilambda, positive = TRUE))
+
+ if (length(ilambda) &&
+ !is.Numeric(ilambda, positive = TRUE))
stop("bad input for argument 'ilambda'")
- if (length(ibetave) && !is.Numeric(ibetave, positive = TRUE))
+ if (length(ibetave) &&
+ !is.Numeric(ibetave, positive = TRUE))
stop("bad input for argument 'ibetave'")
ilambda[abs(ilambda - 1) < 0.01] = 1.1
- if (!is.list(ebetave))
- ebetave = list()
- if (!is.list(elambda))
- elambda = list()
-#print("hi4, 20110319")
+
new("vglmff",
blurb = c("Exponential Poisson distribution \n \n",
"Links: ",
namesof("lambda", llambda, earg = elambda), ", ",
namesof("betave", lbetave, earg = ebetave), "\n",
"Mean: lambda/(expm1(lambda) * betave)) * ",
- "genhypergeo(c(1,1),c(2,2),lambda)"),
+ "genhypergeo(c(1, 1),c(2, 2),lambda)"),
-# genhypergeo() from package: hypergeo
-# ref = mean from Kus pg 4499
constraints = eval(substitute(expression({
constraints = cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero))),
initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
predictors.names = c(
namesof("lambda", .llambda, earg = .elambda, short = TRUE),
namesof("betave", .lbetave, earg = .ebetave, short = TRUE))
if (!length(etastart)) {
-#MLE for lambda from Kus eqn(6) pg 4500
betave.init = if (length( .ibetave ))
- rep( .ibetave , len = n) else
- stop("Need to input a value into argument 'ibetave'")
- ## (lambda.init/(expm1(lambda.init) * (y + 1/8))) *
- ## genhypergeo(c(1,1),c(2,2),lambda.init)
+ rep( .ibetave , len = n) else
+ stop("Need to input a value into argument 'ibetave'")
lambda.init = if (length( .ilambda ))
rep( .ilambda , len = n) else
(1/betave.init - mean(y)) / ((y *
exp(-betave.init * y))/n)
-# supply inital values for now to get function working
betave.init = rep(weighted.mean(betave.init, w = w), len = n)
-#print("head(lambda.init)")
-#print( head(lambda.init) )
-#print("head(betave.init)")
-#print( head(betave.init) )
etastart = cbind(theta2eta(lambda.init, .llambda ,earg = .elambda ),
theta2eta(betave.init, .lbetave ,earg = .ebetave ))
-#print("head(etastart, 3)")
-#print( head(etastart, 3) )
}
- }), list( .llambda = llambda, .lbetave = lbetave,
- .ilambda = ilambda, .ibetave = ibetave,
- .elambda = elambda, .ebetave = ebetave))),
+ }), list( .llambda = llambda, .lbetave = lbetave,
+ .ilambda = ilambda, .ibetave = ibetave,
+ .elambda = elambda, .ebetave = ebetave))),
linkinv = eval(substitute(function(eta, extra = NULL) {
lambda = eta2theta(eta[, 1], .llambda , earg = .elambda )
betave = eta2theta(eta[, 2], .lbetave , earg = .ebetave )
-# warning("returning dud means")
-# mu
-# runif(nrow(eta))
-# 20110319; not sure about the following:
-# 20110319; and not in .Rd file.
- -lambda * genhypergeo(c(1, 1), c(2, 2), lambda) / (expm1(-lambda) *
+ -lambda * genhypergeo(c(1, 1), c(2, 2), lambda) / (expm1(-lambda) *
betave)
}, list( .llambda = llambda, .lbetave = lbetave,
.elambda = elambda, .ebetave = ebetave))),
last = eval(substitute(expression({
misc$link = c(lambda = .llambda , betave = .lbetave )
+
misc$earg = list(lambda = .elambda , betave = .ebetave )
+
misc$expected = TRUE
-
+ misc$multipleResponses <- FALSE
}), list( .llambda = llambda, .lbetave = lbetave,
.elambda = elambda, .ebetave = ebetave))),
@@ -207,15 +178,14 @@ rexppois <- function(n, lambda, betave = 1) {
betave = eta2theta(eta[, 2], .lbetave , earg = .ebetave )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dexppois(x = y, lambda = lambda, betave = betave,
- log = TRUE))
+ sum(c(w) * dexppois(x = y, lambda = lambda, betave = betave,
+ log = TRUE))
}
}, list( .lbetave = lbetave , .llambda = llambda ,
.elambda = elambda , .ebetave = ebetave ))),
vfamily = c("exppoisson"),
-# Updated 22/12/2010
deriv = eval(substitute(expression({
lambda = eta2theta(eta[, 1], .llambda , earg = .elambda )
betave = eta2theta(eta[, 2], .lbetave , earg = .ebetave )
@@ -223,33 +193,30 @@ rexppois <- function(n, lambda, betave = 1) {
dl.dlambda = 1/lambda - 1/expm1(lambda) - 1 + exp(-betave * y)
dbetave.deta = dtheta.deta(betave, .lbetave , earg = .ebetave )
dlambda.deta = dtheta.deta(lambda, .llambda , earg = .elambda )
- c(w) * cbind(dl.dlambda * dlambda.deta, dl.dbetave * dbetave.deta)
+ c(w) * cbind(dl.dlambda * dlambda.deta,
+ dl.dbetave * dbetave.deta)
}), list( .llambda = llambda, .lbetave = lbetave,
.elambda = elambda, .ebetave = ebetave ))),
weight = eval(substitute(expression({
-# Updated 22/12/2010
temp1 = -expm1(-lambda)
-# Ref: Kus, pg 4502, J11
- ed2l.dlambda2 = (1 + exp(2 * lambda) - lambda^2 * exp(lambda) - 2 *
+ ned2l.dlambda2 = (1 + exp(2 * lambda) - lambda^2 * exp(lambda) - 2 *
exp(lambda)) / (lambda * temp1)^2
-# Ref: Kus, pg 4502, J22
- ed2l.dbetave2 = 1 / betave^2 - (lambda^2 * exp(-lambda) / (4 *
+ ned2l.dbetave2 = 1 / betave^2 - (lambda^2 * exp(-lambda) / (4 *
betave^2 * temp1)) *
- genhypergeo(c(2,2,2),c(3,3,3),lambda)
+ genhypergeo(c(2, 2, 2),c(3, 3, 3),lambda)
-# Ref: Kus, pg 4502,J12
- ed2l.dbetavelambda = (lambda * exp(-lambda) / (4 * betave * temp1)) *
- genhypergeo(c(2,2),c(3,3),lambda)
+ ned2l.dbetavelambda = (lambda * exp(-lambda) / (4 * betave * temp1)) *
+ genhypergeo(c(2, 2),c(3, 3),lambda)
wz <- matrix(0, n, dimm(M))
- wz[, iam(1, 1, M)] = dlambda.deta^2 * ed2l.dlambda2
- wz[, iam(2, 2, M)] = dbetave.deta^2 * ed2l.dbetave2
- wz[, iam(1, 2, M)] = dbetave.deta * dlambda.deta * ed2l.dbetavelambda
+ wz[, iam(1, 1, M)] = dlambda.deta^2 * ned2l.dlambda2
+ wz[, iam(2, 2, M)] = dbetave.deta^2 * ned2l.dbetave2
+ wz[, iam(1, 2, M)] = dbetave.deta * dlambda.deta * ned2l.dbetavelambda
c(w) * wz
}), list( .zero = zero ))))
}
@@ -259,26 +226,17 @@ rexppois <- function(n, lambda, betave = 1) {
-#=======================================================================
-# 14/12/10 [drpq]genray() and genrayleigh().
-# References: Kundu and Raqab, CSDA 49 (2005) pg 187 and
-# Raqab and Kundu "Burr Type X Distribution Revisited"
-# Updated by Thomas 10/01/2011
-# Notes:
-# 1. scale = 1 / \lambda here, = \delta, say.
-# 2. My calculations showed EIM_{12} did not agree with Kundu and
-# Raqab, (2005). So am using nsimEIM.
-# Ref: Kundu pg 188
-# Updated 22/12/10
dgenray <- function(x, shape, scale = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
+
+
N <- max(length(x), length(shape), length(scale))
x <- rep(x, len = N)
shape <- rep(shape, len = N)
@@ -300,8 +258,6 @@ dgenray <- function(x, shape, scale = 1, log = FALSE) {
}
-# Ref: Kundu pg 188
-# Updated 22/12/10
pgenray <- function(q, shape, scale = 1) {
ans <- (-expm1(-(q/scale)^2))^shape
ans[q <= 0] <- 0
@@ -310,8 +266,6 @@ pgenray <- function(q, shape, scale = 1) {
}
-# Ref: Kundu pg 193
-# Updated 22/12/10
qgenray <- function(p, shape, scale = 1) {
ans <- scale * sqrt(-log1p(-(p^(1/shape))))
ans[(shape <= 0) | (scale <= 0)] = NaN
@@ -325,8 +279,6 @@ qgenray <- function(p, shape, scale = 1) {
-# Ref: Kundu pg 193
-# Updated 22/12/10
rgenray <- function(n, shape, scale = 1) {
ans <- qgenray(runif(n), shape = shape, scale = scale)
ans[(shape <= 0) | (scale <= 0)] <- NaN
@@ -335,44 +287,40 @@ rgenray <- function(n, shape, scale = 1) {
-###################
-# The family function
-# References: Kundu CSDA 49 (2005) pg 187 & Raqab and
-# Kundu "Burr Type X Distribution Revisited"
-# updated 05/01/2011
-# updated by Thomas 10/01/2011
-genrayleigh.control <- function(save.weight = TRUE, ...)
-{
-# Because of nsimEIM in @weight
+genrayleigh.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
- genrayleigh = function (lshape = "loge", lscale = "loge",
- eshape = list(), escale = list(),
+
+ genrayleigh <- function(lshape = "loge", lscale = "loge",
ishape = NULL, iscale = NULL,
tol12 = 1.0e-05,
nsimEIM = 300, zero = 1) {
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
+ lshape <- as.list(substitute(lshape))
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
- if (length(ishape) && !is.Numeric(ishape, positive = TRUE))
+ if (length(ishape) &&
+ !is.Numeric(ishape, positive = TRUE))
stop("bad input for argument 'ishape'")
- if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
+ if (length(iscale) &&
+ !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
- if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE) || nsimEIM <= 50)
- stop("'nsimEIM' should be an integer greater than 50")
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE) ||
+ nsimEIM <= 50)
+ stop("argument 'nsimEIM' should be an integer greater than 50")
- if (!is.list(escale))
- escale = list()
- if (!is.list(eshape))
- eshape = list()
new("vglmff",
@@ -385,46 +333,46 @@ genrayleigh.control <- function(save.weight = TRUE, ...)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
-
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+
+
predictors.names = c(
namesof("shape", .lshape , earg = .eshape , short = TRUE),
namesof("scale", .lscale , earg = .escale , short = TRUE))
-# Following getmaxmin method implemented on 06/01/11
if (!length(etastart)) {
- genrayleigh.Loglikfun = function(scale, y, x, w, extraargs) {
+ genrayleigh.Loglikfun <- function(scale, y, x, w, extraargs) {
temp1 <- y / scale
-# Equation (7) from Kundu and Raqab, p.190, which derives from their (2).
-# It gives the MLE of shape, given Scale.
shape = -1 / weighted.mean(log1p(-exp(-temp1^2)), w = w)
- ans <- sum(w * (log(2) + log(shape) + log(y) - 2 * log(scale) -
- temp1^2 + (shape - 1) * log1p(-exp(-temp1^2))))
-#print("c(scale, ans)")
-#print( c(scale, ans) )
+ ans <- sum(c(w) * (log(2) + log(shape) + log(y) -
+ 2 * log(scale) - temp1^2 +
+ (shape - 1) * log1p(-exp(-temp1^2))))
ans
}
-# Note: problems occur if scale values too close to zero:
- scale.grid = seq(0.2 * stats::sd(y), 5 * stats::sd(y), len = 29)
+ scale.grid = seq(0.2 * stats::sd(c(y)),
+ 5.0 * stats::sd(c(y)), len = 29)
scale.init = if (length( .iscale )) .iscale else
getMaxMin(scale.grid, objfun = genrayleigh.Loglikfun,
y = y, x = x, w = w)
-#print("head(scale.init)")
-#print( head(scale.init) )
scale.init = rep(scale.init, length = length(y))
shape.init = if (length( .ishape )) .ishape else
-1 / weighted.mean(log1p(-exp(-(y/scale.init)^2)),
w = w)
-#print("head(shape.init)")
-#print( head(shape.init) )
shape.init = rep(shape.init, length = length(y))
etastart = cbind(theta2eta(shape.init, .lshape, earg = .eshape),
theta2eta(scale.init, .lscale, earg = .escale))
-#print(",,,,,,,,,,,,,,,,,,,")
}
}), list( .lscale = lscale, .lshape = lshape,
.iscale = iscale, .ishape = ishape,
@@ -433,17 +381,18 @@ genrayleigh.control <- function(save.weight = TRUE, ...)
linkinv = eval(substitute(function(eta, extra = NULL) {
shape = eta2theta(eta[, 1], .lshape , earg = .eshape )
Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
-# zz yet to do: Find expression for mean
-# Much easier to return the median rather than the mean:
qgenray(p = 0.5, shape = shape, scale = Scale)
}, list( .lshape = lshape, .lscale = lscale,
.eshape = eshape, .escale = escale ))),
last = eval(substitute(expression({
misc$link = c(shape = .lshape , scale = .lscale )
+
misc$earg = list(shape = .eshape , scale = .escale )
+
misc$expected = TRUE
misc$nsimEIM = .nsimEIM
+ misc$multipleResponses <- FALSE
}), list( .lshape = lshape, .lscale = lscale,
.eshape = eshape, .escale = escale,
.nsimEIM = nsimEIM ))),
@@ -453,14 +402,11 @@ genrayleigh.control <- function(save.weight = TRUE, ...)
shape = eta2theta(eta[, 1], .lshape , earg = .eshape )
Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
-#print("head(shape, 3)")
-#print( head(shape, 3) )
-#print("head(Scale, 3)")
-#print( head(Scale, 3) )
if (residuals) stop("loglikelihood residuals",
"not implemented yet") else {
- sum(w * dgenray(x = y, shape = shape, scale = Scale, log = TRUE))
+ sum(c(w) * dgenray(x = y, shape = shape,
+ scale = Scale, log = TRUE))
}
}, list( .lshape = lshape , .lscale = lscale ,
.eshape = eshape , .escale = escale ))),
@@ -470,15 +416,10 @@ genrayleigh.control <- function(save.weight = TRUE, ...)
deriv = eval(substitute(expression({
shape = eta2theta(eta[, 1], .lshape , earg = .eshape )
Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
-#print("head(shape, 3)")
-#print( head(shape, 3) )
-#print("head(Scale, 3)")
-#print( head(Scale, 3) )
dshape.deta = dtheta.deta(shape, .lshape , earg = .eshape )
dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
dthetas.detas = cbind(dshape.deta, dscale.deta)
-# Note: singularities wrt derivatives at shape==0 and zz:
temp1 <- y / Scale
temp2 <- exp(-temp1^2)
temp3 <- temp1^2 / Scale
@@ -487,25 +428,16 @@ genrayleigh.control <- function(save.weight = TRUE, ...)
dl.dshape = 1/shape + log1p(-temp2)
dl.dscale = -2 / Scale + AAA * (1 - (shape - 1) * temp2 / BBB)
-# Special fixup:
- dl.dshape[!is.finite(dl.dshape)] = max(dl.dshape[is.finite(dl.dshape)])
+ dl.dshape[!is.finite(dl.dshape)] =
+ max(dl.dshape[is.finite(dl.dshape)])
answer <- c(w) * cbind(dl.dshape, dl.dscale) * dthetas.detas
-#print("summary(answer)")
-#print( summary(answer) )
-#print("head(answer, 3)")
-#print( head(answer, 3) )
answer
}), list( .lshape = lshape , .lscale = lscale,
.eshape = eshape, .escale = escale ))),
weight = eval(substitute(expression({
-# 20110108; I disagree with EIM_{12} of pg 190 of Kundu and Raqab.
-# So am using simulated Fisher scoring.
-# Notes:
-# 1. Inf occurs (albeit infequently) for dl.dshape when ysim is close to 0
-# Special fixup to handle this.
run.varcov = 0
ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
@@ -520,7 +452,6 @@ genrayleigh.control <- function(save.weight = TRUE, ...)
dl.dshape = 1/shape + log1p(-temp2)
dl.dscale = -2 / Scale + AAA * (1 - (shape - 1) * temp2 / BBB)
-# Special fixup:
dl.dshape[!is.finite(dl.dshape)] = max(
dl.dshape[is.finite(dl.dshape)])
@@ -534,12 +465,6 @@ genrayleigh.control <- function(save.weight = TRUE, ...)
matrix(colMeans(run.varcov, na.rm = FALSE),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
wz = wz * dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
-#print("summary(run.varcov)")
-#print( summary(run.varcov) )
-#print("summary(wz)")
-#print( summary(wz) )
-#print("head(wz,3)")
-#print( head(wz,3) )
c(w) * wz
}), list( .lshape = lshape , .lscale = lscale,
.eshape = eshape, .escale = escale,
@@ -552,22 +477,15 @@ genrayleigh.control <- function(save.weight = TRUE, ...)
-#=======================================================================
-# 20/01/10; [drpq]expgeom() and expgeometric().
-# Reference: Adamidis and Loukas, SPL 39 (1998) pg 35--42
-# Notes:
-# Scale is the reciprocal of scale in Adamidis.
-# Updated and working 03/02/2011
-# Ref: Adamidis pg.36
dexpgeom <- function(x, scale = 1, shape, log = FALSE) {
-# 20110201; looks okay.
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
+
N <- max(length(x), length(scale), length(shape))
x <- rep(x, len = N)
scale <- rep(scale, len = N)
@@ -589,9 +507,7 @@ dexpgeom <- function(x, scale = 1, shape, log = FALSE) {
}
-# Ref: Adamidis p.37, (3.1)
pexpgeom <- function(q, scale = 1, shape) {
-# 20110201; looks okay.
temp1 <- -q / scale
ans <- -expm1(temp1) / (1 - shape * exp(temp1))
ans[q <= 0] <- 0
@@ -601,7 +517,6 @@ pexpgeom <- function(q, scale = 1, shape) {
qexpgeom <- function(p, scale = 1, shape) {
-# 20110201; looks okay.
ans <- (-scale) * log((p - 1) / (p * shape - 1))
ans[(scale <= 0) | (shape <= 0) | (shape >= 1)] <- NaN
ans[p < 0] <- NaN
@@ -620,35 +535,29 @@ rexpgeom <- function(n, scale = 1, shape) {
-#=================================================================
-# Exponential geometric family function.
-# Reference: Adamidis & Loukas, SPL 39 (1998) pg 35--42
-# All derivatives etc copied directly from article
-# Updated and working 03/02/2011
-# Notes:
-# Scale is the reciprocal of scale in Adamidis.
expgeometric.control <- function(save.weight = TRUE, ...)
{
-# Because of nsimEIM in @weight
list(save.weight = save.weight)
}
- expgeometric = function (lscale = "loge", lshape = "logit",
- escale = list(), eshape = list(),
+ expgeometric <- function(lscale = "loge", lshape = "logit",
iscale = NULL, ishape = NULL,
tol12 = 1.0e-05, zero = 1,
nsimEIM = 400) {
-# 20110102; modified by TWYee. Works.
-# Yet to do: get proper Fisher scoring going.
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
+ lshape <- as.list(substitute(lshape))
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
if (length(ishape))
if (!is.Numeric(ishape, positive = TRUE) || any(ishape >= 1))
@@ -658,15 +567,13 @@ expgeometric.control <- function(save.weight = TRUE, ...)
if (!is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
- if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (!is.list(escale))
- escale = list()
- if (!is.list(eshape))
- eshape = list()
- if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE))
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE))
stop("bad input for argument 'nsimEIM'")
if (nsimEIM <= 50)
stop("'nsimEIM' should be an integer greater than 50")
@@ -679,7 +586,6 @@ expgeometric.control <- function(save.weight = TRUE, ...)
namesof("shape", lshape, earg = eshape), "\n",
"Mean: ", "(shape - 1) * log(1 - ",
"shape) / (shape / Scale)"),
-# mean = Adamidis eqn. (3.2)
constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero, M)
@@ -687,8 +593,19 @@ expgeometric.control <- function(save.weight = TRUE, ...)
initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+
+
+
predictors.names = c(
namesof("Scale", .lscale , earg = .escale , short = TRUE),
@@ -699,31 +616,21 @@ expgeometric.control <- function(save.weight = TRUE, ...)
scale.init = if (is.Numeric( .iscale , positive = TRUE)) {
rep( .iscale , len = n)
} else {
-# The scale parameter should be
-# the standard deviation of y.
- stats::sd(y) # The papers scale parameter beta
+ stats::sd(c(y)) # The papers scale parameter beta
}
-#print("head(scale.init)")
-#print( head(scale.init) )
shape.init = if (is.Numeric( .ishape , positive = TRUE)) {
rep( .ishape , len = n)
} else {
-# Use the formula for the median:
rep(2 - exp(median(y)/scale.init), len = n)
}
-# But avoid extremes:
shape.init[shape.init >= 0.95] = 0.95
shape.init[shape.init <= 0.05] = 0.05
-#print("head(shape.init)")
-#print( head(shape.init) )
etastart = cbind(theta2eta(scale.init, .lscale , earg = .escale ),
theta2eta(shape.init, .lshape , earg = .eshape ))
-#print("head(etastart, 3)")
-#print( head(etastart, 3) )
}
}), list( .lscale = lscale, .lshape = lshape,
.iscale = iscale, .ishape = ishape,
@@ -733,7 +640,6 @@ expgeometric.control <- function(save.weight = TRUE, ...)
Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
-# Return the mean as fitted value; Adamidis Equation (3.2)
(shape - 1) * log1p(-shape) / (shape / Scale)
}, list( .lscale = lscale, .lshape = lshape,
@@ -741,9 +647,12 @@ expgeometric.control <- function(save.weight = TRUE, ...)
last = eval(substitute(expression({
misc$link = c(Scale = .lscale , shape = .lshape )
+
misc$earg = list(Scale = .escale , shape = .eshape )
+
misc$expected = TRUE
misc$nsimEIM = .nsimEIM
+ misc$multipleResponses <- FALSE
}), list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape,
.nsimEIM = nsimEIM ))),
@@ -753,14 +662,11 @@ expgeometric.control <- function(save.weight = TRUE, ...)
Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
-#print("head(shape, 3)")
-#print( head(shape, 3) )
-#print("head(Scale, 3)")
-#print( head(Scale, 3) )
if (residuals) stop("loglikelihood residuals",
"not implemented yet") else {
- sum(w * dexpgeom(x = y, scale = Scale, shape = shape, log = TRUE))
+ sum(c(w) * dexpgeom(x = y, scale = Scale, shape = shape,
+ log = TRUE))
}
}, list( .lscale = lscale , .lshape = lshape ,
.escale = escale , .eshape = eshape ))),
@@ -771,7 +677,6 @@ expgeometric.control <- function(save.weight = TRUE, ...)
Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
-# JGL calculated:
temp2 <- exp(-y / Scale)
temp3 <- shape * temp2
temp4 <- y / Scale^2
@@ -783,54 +688,27 @@ expgeometric.control <- function(save.weight = TRUE, ...)
dthetas.detas = cbind(dscale.deta, dshape.deta)
answer <- c(w) * cbind(dl.dscale, dl.dshape) * dthetas.detas
-#print("summary(answer)")
-#print( summary(answer) )
-#print("head(answer, 3)")
-#print( head(answer, 3) )
answer
}), list( .lscale = lscale , .lshape = lshape,
.escale = escale, .eshape = eshape ))),
-#######################
weight = eval(substitute(expression({
-#EIM copied exactly as Adamidis article page 40
-# Yet to do: get this proper Fisher scoring going.
-# gls package function "dilog()" used for polylog function..check up
-# on this.
-# if (FALSE) {
-# ed2l.dscale2 = (3 * shape - 2 * (shape - (1 - shape) *
-# (gsl::dilog(shape,2)$val))) / (3 * Scale^2 * shape)
-# ed2l.dshape2 = (1 - shape)^(-2) / 3
-# ed2l.dscaleshape = (4 * shape^2 - shape + (1 - shape)^2 *
-# log1p(-shape)) / (3 * Scale * shape^2 * (1 - shape))
-# wz <- matrix(0, n, dimm(M))
-# wz[, iam(1, 1, M)] = dscale.deta^2 * ed2l.dscale2
-# wz[, iam(2, 2, M)] = dshape.deta^2 * ed2l.dshape2
-# wz[, iam(1, 2, M)] = dscale.deta * dshape.deta * ed2l.dscaleshape
-# c(w) * wz
-# }
-# 5/10/07: Use simulation to estimate the EIM
-# Use an updating formula for the mean and variance
-# Ref.: Hastie and Tibshirani, 1990, GAM book, p.35.
-# Here, the variance has 'n' in denominator, not 'n-1'.
run.varcov = 0
ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
if (length( .nsimEIM )) {
-# Simulated FS used only if nsimEIM was specified.
for(ii in 1:( .nsimEIM )) {
ysim = rexpgeom(n, scale=Scale, shape=shape)
-# Now compute some quantities
temp2 <- exp(-ysim / Scale)
temp3 <- shape * temp2
temp4 <- ysim / Scale^2
@@ -840,27 +718,19 @@ expgeometric.control <- function(save.weight = TRUE, ...)
2 * temp2 / (1 - temp3)
temp6 = cbind(dl.dscale, dl.dshape)
-#print("temp6[1:3,]")
-#print( temp6[1:3,] )
run.varcov = run.varcov +
- temp6[,ind1$row.index] * temp6[,ind1$col.index]
+ temp6[,ind1$row.index] * temp6[,ind1$col.index]
}
run.varcov = run.varcov / .nsimEIM
-# Can do even better if it is an intercept-only model
wz = if (intercept.only)
matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
-#print("wz[1:3,]")
-#print( wz[1:3,] )
wz = wz * dthetas.detas[, ind1$row] *
dthetas.detas[, ind1$col]
-#print("using simulation")
}
-#print("wz[1:3,]")
-#print( wz[1:3,] )
c(w) * wz
}), list( .nsimEIM = nsimEIM ))))
@@ -871,20 +741,15 @@ expgeometric.control <- function(save.weight = TRUE, ...)
-#=======================================================================
-# 16/02/10; [drpq]explog() and explogarithmic().
-# Reference: Tahmasabi and Rezaei, CSDA 52 (2008) pg 3889--3901
-# Notes:
-# Scale is the reciprocal of scale in Tahmasabi.
-# Ref: Tahmasabi pg.3890
dexplog <- function(x, scale = 1, shape, log = FALSE) {
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
+
N <- max(length(x), length(scale), length(shape))
x <- rep(x, len = N)
scale <- rep(scale, len = N)
@@ -907,7 +772,6 @@ dexplog <- function(x, scale = 1, shape, log = FALSE) {
}
-# Ref: Tahmasabi pg. 3890
pexplog <- function(q, scale = 1, shape) {
ans <- 1 - log1p(-(1-shape) * exp(-q / scale)) / log(shape)
ans[q <= 0] <- 0
@@ -917,14 +781,9 @@ pexplog <- function(q, scale = 1, shape) {
-#ref: Tahmasabi pg. 3892
-# 20110319; this was wrong. Corrected by TWY.
qexplog <- function(p, scale = 1, shape) {
-# orig is wrong:
-# ans <- scale * log((1 - shape) / (1 - shape^p))
-# 20110319, twy picked up an error:
ans <- -scale * (log1p(-shape^(1.0 - p)) - log1p(-shape))
ans[(scale <= 0) | (shape <= 0) | (shape >= 1)] <- NaN
@@ -937,7 +796,6 @@ qexplog <- function(p, scale = 1, shape) {
-#ref: Tahmasabi pg. 3892
rexplog <- function(n, scale = 1, shape) {
ans <- qexplog(runif(n), scale = scale, shape = shape)
ans[(scale <= 0) | (shape <= 0) | (shape >= 1)] <- NaN
@@ -949,51 +807,48 @@ rexplog <- function(n, scale = 1, shape) {
-#=================================================================
-# Exponential logarithmic.
-# Reference: Tahmasbi and Rezaei, CSDA 52 (2008) pg 3889--3901
-# Notes:
-# Scale is the reciprocal of scale in Tahmasabi.
-#updated and working 27/02/11
explogarithmic.control <- function(save.weight = TRUE, ...)
{
-# Because of nsimEIM in @weight
list(save.weight = save.weight)
}
- explogarithmic = function (lscale = "loge", lshape = "logit",
- escale = list(), eshape = list(),
+ explogarithmic <- function(lscale = "loge", lshape = "logit",
iscale = NULL, ishape = NULL,
tol12 = 1.0e-05, zero = 1,
nsimEIM = 400) {
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+ lshape <- as.list(substitute(lshape))
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
+
if (length(ishape))
- if (!is.Numeric(ishape, positive = TRUE) || any(ishape >= 1))
+ if (!is.Numeric(ishape, positive = TRUE) ||
+ any(ishape >= 1))
stop("bad input for argument 'ishape'")
if (length(iscale))
if (!is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
- if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE,
+ positive = TRUE))
stop("bad input for argument 'zero'")
- if (!is.list(escale))
- escale = list()
- if (!is.list(eshape))
- eshape = list()
- if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE))
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE))
stop("bad input for argument 'nsimEIM'")
if (nsimEIM <= 50)
- stop("'nsimEIM' should be an integer greater than 50")
+ stop("argument 'nsimEIM' should be an integer greater than 50")
+
new("vglmff",
blurb = c("Exponential logarithmic distribution\n\n",
@@ -1001,15 +856,21 @@ explogarithmic.control <- function(save.weight = TRUE, ...)
namesof("Scale", lscale, earg = escale), ", ",
namesof("shape", lshape, earg = eshape), "\n",
"Mean: ", "(-polylog(2, 1 - p) * Scale) / log(shape)"),
-# mean = Tahmabasi pg. 3891
constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
predictors.names = c(
namesof("Scale", .lscale , earg = .escale , short = TRUE),
@@ -1020,31 +881,22 @@ explogarithmic.control <- function(save.weight = TRUE, ...)
scale.init = if (is.Numeric( .iscale , positive = TRUE)) {
rep( .iscale , len = n)
} else {
-# The scale parameter should be
-# the standard deviation of y.
- stats::sd(y)
+ stats::sd(c(y))
}
shape.init = if (is.Numeric( .ishape , positive = TRUE)) {
rep( .ishape , len = n)
} else {
-# Use the formula for the median (Tahmasabi pg. 3891):
rep((exp(median(y)/scale.init) - 1)^2, len = n)
}
-# But avoid extremes:
shape.init[shape.init >= 0.95] = 0.95
shape.init[shape.init <= 0.05] = 0.05
-#print("head(scale.init)")
-#print( head(scale.init) )
-#print("head(shape.init)")
-#print( head(shape.init) )
- etastart = cbind(theta2eta(scale.init, .lscale , earg = .escale ),
- theta2eta(shape.init, .lshape , earg = .eshape ))
+ etastart =
+ cbind(theta2eta(scale.init, .lscale , earg = .escale ),
+ theta2eta(shape.init, .lshape , earg = .eshape ))
-#print("head(etastart, 3)")
-#print( head(etastart, 3) )
}
}), list( .lscale = lscale, .lshape = lshape,
.iscale = iscale, .ishape = ishape,
@@ -1054,13 +906,7 @@ explogarithmic.control <- function(save.weight = TRUE, ...)
Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
-# warning("returning dud means")
-# runif(nrow(eta))
-# zz yet to do: Find polylog function
-# mean should be the fitted value; Tahmasabi pg. 3891
-# (-polylog(2, 1 - p) * Scale) / log(shape)
-# mean contains polylog function therefore return median for now:
qexplog(p = 0.5, shape = shape, scale = Scale)
@@ -1069,9 +915,12 @@ explogarithmic.control <- function(save.weight = TRUE, ...)
last = eval(substitute(expression({
misc$link = c(Scale = .lscale , shape = .lshape )
+
misc$earg = list(Scale = .escale , shape = .eshape )
+
misc$expected = TRUE
misc$nsimEIM = .nsimEIM
+ misc$multipleResponses <- FALSE
}), list( .lscale = lscale, .lshape = lshape,
.escale = escale, .eshape = eshape,
.nsimEIM = nsimEIM ))),
@@ -1082,14 +931,11 @@ explogarithmic.control <- function(save.weight = TRUE, ...)
Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
-#print("head(shape, 3)")
-#print( head(shape, 3) )
-#print("head(Scale, 3)")
-#print( head(Scale, 3) )
if (residuals) stop("loglikelihood residuals",
"not implemented yet") else {
- sum(w * dexplog(x = y, scale = Scale, shape = shape, log = TRUE))
+ sum(c(w) * dexplog(x = y, scale = Scale,
+ shape = shape, log = TRUE))
}
}, list( .lscale = lscale , .lshape = lshape ,
.escale = escale , .eshape = eshape ))),
@@ -1100,7 +946,6 @@ explogarithmic.control <- function(save.weight = TRUE, ...)
Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
-# JGL calculated:
temp2 <- exp(-y / Scale)
temp3 <- y / Scale^2
temp4 <- 1 - shape
@@ -1114,29 +959,21 @@ explogarithmic.control <- function(save.weight = TRUE, ...)
dthetas.detas = cbind(dscale.deta, dshape.deta)
answer <- c(w) * cbind(dl.dscale, dl.dshape) * dthetas.detas
-#print("summary(answer)")
-#print( summary(answer) )
-#print("head(answer, 3)")
-#print( head(answer, 3) )
answer
}), list( .lscale = lscale , .lshape = lshape,
.escale = escale, .eshape = eshape ))),
-#######################
weight = eval(substitute(expression({
-# 5/10/07: Use simulation to estimate the EIM
run.varcov = 0
ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
if (length( .nsimEIM )) {
-# Simulated FS used only if nsimEIM was specified.
for(ii in 1:( .nsimEIM )) {
ysim = rexplog(n, scale=Scale, shape=shape)
-# Now compute some quantities
temp2 <- exp(-ysim / Scale)
temp3 <- ysim / Scale^2
temp4 <- 1 - shape
@@ -1146,27 +983,20 @@ explogarithmic.control <- function(save.weight = TRUE, ...)
temp2 / (1 - temp4 * temp2)
temp6 = cbind(dl.dscale, dl.dshape)
-#print("temp6[1:3,]")
-#print( temp6[1:3,] )
run.varcov = run.varcov +
- temp6[,ind1$row.index] * temp6[,ind1$col.index]
+ temp6[,ind1$row.index] *
+ temp6[,ind1$col.index]
}
run.varcov = run.varcov / .nsimEIM
-# Can do even better if it is an intercept-only model
wz = if (intercept.only)
matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
-#print("wz[1:3,]")
-#print( wz[1:3,] )
wz = wz * dthetas.detas[, ind1$row] *
dthetas.detas[, ind1$col]
-#print("using simulation")
}
-#print("wz[1:3,]")
-#print( wz[1:3,] )
c(w) * wz
}), list( .nsimEIM = nsimEIM ))))
@@ -1178,371 +1008,51 @@ explogarithmic.control <- function(save.weight = TRUE, ...)
-#=======================================================================
-# 09/02/10; [drpq]weibull3()
-# Reference "The Weibull distribution - A Handbook" by Horst Rinne
-# 20110319; withdrawing [dpqrt]weibull3() due to regularity conditions not
-# being met.
-#Ref: pg. 30
-#working 10/02/2010
-dweibull3 <- function(x, location = 0, scale = 1, shape, log = FALSE) {
+dweibull3 <- function(x, location = 0, scale = 1, shape,
+ log = FALSE) {
+
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
- log.arg = log
- rm(log)
- dweibull(x = x - location, shape = shape, scale = scale, log = log.arg)
+ dweibull(x = x - location, shape = shape,
+ scale = scale, log = log.arg)
}
-# Ref: pg 43
-# working 10/02/2010
pweibull3 <- function(q, location = 0, scale = 1, shape) {
pweibull(q = q - location, scale = scale, shape = shape)
}
-# Ref: pg 68
-# updated and working 18/02/2010
qweibull3 <- function(p, location = 0, scale = 1, shape) {
location + qweibull(p = p, shape = shape, scale = scale)
}
-# Ref: pg 68
-# working 11/02/2010
rweibull3 <- function(n, location = 0, scale = 1, shape) {
location + rweibull(n = n, shape = shape, scale = scale)
}
-#=====================================
-# 3-parameter Weibull function
-# 07/02/2011
-
-# This code is based on the 2-parameter Weibull function weibull()
-# Does not accomodate censoring yet.
-# Reference "The Weibull distribution - A Handbook" by Horst Rinne
-
-if (FALSE)
- weibull3 = function(llocation = "identity", lscale = "loge",
- lshape = "loge", elocation = list(),
- escale = list(), eshape = list(),
- ilocation = NULL, iscale = NULL, ishape = NULL,
- imethod = 1, zero = c(2, 3))
-{
-
- llocat = llocation
- elocat = elocation
- ilocat = ilocation
-
- if (mode(llocat) != "character" && mode(llocat) != "name")
- llocat = as.character(substitute(llocat))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
-
- if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
-
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 3)
- stop("argument 'imethod' must be 1, 2 or 3")
-
- if (!is.list(elocat)) elocat = list()
- if (!is.list(eshape)) eshape = list()
- if (!is.list(escale)) escale = list()
-
- new("vglmff",
- blurb = c("3-parameter Weibull distribution\n\n",
- "Links: ",
- namesof("location", llocat, earg = elocat), ", ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("shape", lshape, earg = eshape), "\n",
- "Mean: location + scale * gamma(1 + 1/shape)\n",
- "Variance: scale^2 * (gamma(1 + 2/shape) - ",
- "gamma(1 + 1/shape)^2)"),
-#Ref: Rinne (Mean - pg 77 eqn. 2.64b; Var - pg 89 eqn. 2.88a)
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
-
- initialize = eval(substitute(expression({
- y = cbind(y)
- if (ncol(y) > 1)
- stop("the response must be a vector or a 1-column matrix")
-
- if (is.SurvS4(y))
- stop("only uncensored observations are allowed; don't use SurvS4()")
-
- predictors.names =
- c(namesof("location", .llocat, earg = .elocat, tag = FALSE),
- namesof("scale", .lscale, earg = .escale, tag = FALSE),
- namesof("shape", .lshape, earg = .eshape, tag = FALSE))
-
-
- if (!length(etastart)) {
-#Assigning shape.init, scale.init, locat.init
-
- if ( .imethod == 1) {
-# method of moments - Rinne page 464
-# working - 22/02/2011
-
- if(length( .ishape )) {
- shape.init = rep( .ishape , len = n )
- } else {
-# approximating equation for shape
-# eqn (12.10b)
- alpha3 = ((1/n) *(sum((y - mean(y))^3)))/((1/n) * (sum((y -
- mean(y))^2)))^(3/2)
-# eqn (12.10d)
- temp2 = (alpha3 + 1.14)
- shape.init = rep(-0.729268 - 0.338679 * alpha3 + 4.96077 *
- temp2^(-1.0422) + 0.683609 *
- (log(temp2))^2, len = n)
-#valid for (0.52 <= shape.init <= 100)
- }
-
-#eqn (12.9b)
- scale.init = if(length( .iscale )) {
- rep( .iscale , len = n )
- } else {
- rep(stats::sd(y) / sqrt(gamma(1 + 2/shape.init) -
- gamma(1 + 1/shape.init)^2) , len = n)
- }
-
-#eqn (12.8b)
- locat.init = if(length( .ilocat )) {
- rep( .ilocat , len = n )
- } else {
- rep(mean(y) - scale.init * gamma(1 + 1/shape.init),
- len = n)
- }
-#location = just below min value if smaller than MOM locat.init
- locat.init = pmin(min(y) - 0.05 * diff(range(y)), locat.init)
- }
- if ( .imethod == 2 || .imethod == 3) {
- #least squares method for scale and shape
- #with two separate methods for locat
-
- #code from weibull (2-parameter) for least squares method
- if (!length( .ishape ) || !length( .iscale )) {
- anyc = FALSE # extra$leftcensored | extra$rightcensored
- i11 = if ( .imethod == 2 || .imethod == 3) anyc else
- FALSE
- # can be all data
- qvec = c(.25, .5, .75) # Arbitrary; could be made an argument
- init.shape = if (length( .ishape )) .ishape else 1
- ###init.shape??? should be shape.init?
- xvec = log(-log1p(-qvec))
- fit0 = lsfit(x = xvec, y=log(quantile(y[!i11], qvec)))
- }
-
- shape.init = rep(if(length( .ishape )) .ishape else
- 1/fit0$coef["X"], len = n)
- scale.init = rep(if(length( .iscale )) .iscale else
- exp(fit0$coef["Intercept"]), len = n)
- locat.init = rep(if(length( .ilocat )) .ilocat else
- if ( .imethod == 2) {
- ifelse(min(y)>0, 0.75, 1.25) * min(y)
- } else {
- min(y) - 0.05 * diff(range(y))
- }
- , len = n)
- }
-#print("min(y)")
-#print( min(y) )
-#print("head(locat.init)")
-#print( head(locat.init) )
-#print("head(scale.init)")
-#print( head(scale.init) )
-#print("head(shape.init)")
-#print( head(shape.init) )
-
- etastart =
- cbind(theta2eta(locat.init, .llocat, earg = .elocat ),
- theta2eta(scale.init, .lscale, earg = .escale ),
- theta2eta(shape.init, .lshape, earg = .eshape ))
-
- print("head(etastart, 3)")
- print( head(etastart, 3) )
-
- }
- }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
- .elocat = elocat, .escale = escale, .eshape = eshape,
- .ilocat = ilocat, .iscale = iscale, .ishape = ishape,
- .imethod = imethod) )),
-
- linkinv = eval(substitute(function(eta, extra = NULL) {
- locat = eta2theta(eta[, 1], .llocat, earg = .elocat )
- scale = eta2theta(eta[, 2], .lscale, earg = .escale )
- shape = eta2theta(eta[, 3], .lshape, earg = .eshape )
-
-# fitted value = mean (pg.77 eqn. 2.64b)
- locat + scale * gamma(1 + 1/shape)
-
- }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
- .elocat = elocat, .escale = escale, .eshape = eshape ) )),
- last = eval(substitute(expression({
-
-# From 2-parameter Weibull code:
- if (regnotok <- any(shape <= 2))
- warning("MLE regularity conditions are violated",
- "(shape <= 2) at the final iteration")
-
-# Putting the MLE warning here good because it could possibly be violated
-# only in the early iterations.
-# Putting the MLE warning here is bad because vcov() gets no warning.
-
- misc$link = c(location = .llocat, scale = .lscale, shape = .lshape)
- misc$earg = list(location = .elocat, scale = .escale, shape = .eshape)
- misc$RegCondOK = !regnotok # Save this for later
- }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
- .elocat = elocat, .escale = escale, .eshape = eshape
- ) )),
-
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE,eta, extra = NULL) {
- locat = eta2theta(eta[, 1], .llocat, earg = .elocat )
- scale = eta2theta(eta[, 2], .lscale, earg = .escale )
- shape = eta2theta(eta[, 3], .lshape, earg = .eshape )
-
-
-# 20110319; Some of this code comes from gev().
- if (any(bad <- (y <= locat))) {
- cat("There are", sum(bad), "range violations in @loglikelihood\n")
- flush.console()
- }
- old.answer =
- sum(bad) * (-1.0e10) + ifelse(any(!bad),
- sum(w[!bad] * dweibull3(x = y[!bad], location = locat[!bad],
- scale = scale[!bad],
- shape = shape[!bad], log = TRUE)), 0)
-
-# ell2 = dweibull3(x = y, location = locat, scale = scale,
-# shape = shape, log = TRUE)
-
-#pg 405 eqn. 11.4b
-
-# temp3 = y - locat
-# ell1 = log(shape) - shape * log(scale) + (shape-1) * log(temp3) -
-# (temp3/scale)^shape
-
-#print("max(abs(ell1 - ell2))")
-#print( max(abs(ell1 - ell2)) )
-
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
-# sum(w * ell2)
- old.answer
- }
- }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
- .elocat = elocat, .escale = escale, .eshape = eshape ) )),
- vfamily = c("weibull3"),
- deriv = eval(substitute(expression({
- print("in @deriv")
- print("head(eta, 3)")
- print( head(eta, 3) )
- locat = eta2theta(eta[, 1], .llocat, earg = .elocat )
- scale = eta2theta(eta[, 2], .lscale, earg = .escale )
- shape = eta2theta(eta[, 3], .lshape, earg = .eshape )
-
- dlocat.deta = dtheta.deta(locat, .llocat, earg = .elocat )
- dshape.deta = dtheta.deta(shape, .lshape, earg = .eshape )
- dscale.deta = dtheta.deta(scale, .lscale, earg = .escale )
-
-# equations from pg 405
- temp4 = shape / scale
- zedd = (y - locat) / scale
- print("min(zedd)")
- print( min(zedd) )
-
- if (min(zedd) <= 0)
- warning("Boundary problem. Taking evasive action.")
-
- dl.dlocat = (1 - shape) / (y - locat) + temp4 * zedd^(shape - 1)
- dl.dscale = temp4 * (-1 + zedd^shape)
- dl.dshape = 1 / shape + log(abs(zedd)) - log(abs(zedd)) * zedd^shape
-
- c(w) * cbind( dl.dlocat * dlocat.deta,
- dl.dscale * dscale.deta,
- dl.dshape * dshape.deta)
- }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
- .elocat = elocat, .escale = escale, .eshape = eshape ) )),
- weight = eval(substitute(expression({
- print("in @weight of weibull3()")
-# EulerM = 0.57721566490153286
- EulerM = -digamma(1.0)
-
- print("head(locat)")
- print( head(locat) )
- print("head(scale)")
- print( head(scale) )
- print("head(shape)")
- print( head(shape) )
-
-
- wz = matrix(as.numeric(NA), n, dimm(M))
-
-# equations involving location parameter from Horst Rinne pg 410
- temp6 = 1 - 1 / shape
- ed2l.dlocat2 = gamma(1 - 2/shape) * ((shape - 1) / scale)^2
-# ed2l.dshape2 = ((1 - EulerM)^2 + (pi^2)/6)/shape^2 # Kleiber&Kotz (2003)
-# ed2l.dshape2: modified from the 2-parameter weibull code:
- ed2l.dscale2 = (shape/scale)^2
- ed2l.dshape2 = (6 * (EulerM - 1)^2 + pi^2)/(6 * shape^2)
- ed2l.dlocatscale = -gamma(2 - 1/shape) * (shape/scale)^2
- ed2l.dlocatshape = -(1/scale) * temp6 * gamma(temp6) *
- (1 + digamma(temp6))
- ed2l.dshapescale = (EulerM - 1) / scale
-
- wz[, iam(1,1,M)] = ed2l.dlocat2 * dlocat.deta^2
- wz[, iam(2,2,M)] = ed2l.dscale2 * dscale.deta^2
- wz[, iam(3,3,M)] = ed2l.dshape2 * dshape.deta^2
- wz[, iam(1,2,M)] = ed2l.dlocatscale * dlocat.deta * dscale.deta
- wz[, iam(1,3,M)] = ed2l.dlocatshape * dlocat.deta * dshape.deta
- wz[, iam(2,3,M)] = ed2l.dshapescale * dshape.deta * dscale.deta
-
-# Putting the MLE warning here is bad because could possibly be violated
-# only in the early iterations.
-# Putting MLE warning here is good because vcov() gets another warning.
-
- print("head(wz)")
- print( head(wz) )
-
- wz = c(w) * wz
- wz
- }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
- .elocat = elocat, .escale = escale, .eshape = eshape ))))
-}
-
-# End of James Lauder code here
-#=========================================================================
-# ----------------------------------------------------------------------
-# (b) Arash code
-# 20110615
-# TPN.R
-# ----------------------------------------------------------------------
-# ----------------------------------------------------------------------
### Two-piece normal (TPN) family
-################ dtpn ##################################
dtpn <- function(x, location = 0, scale = 1, skewpar = 0.5,
log.arg = FALSE) {
-# Reference: Arash handnotes
if (any(skewpar <= 0 |
skewpar >= 1 |
@@ -1550,7 +1060,6 @@ dtpn <- function(x, location = 0, scale = 1, skewpar = 0.5,
na.rm = TRUE))
stop("some parameters out of bound")
-# Recycle the vectors to equal lengths
LLL = max(length(x), length(location), length(scale),
length(skewpar))
if (length(x) != LLL) x = rep(x, length = LLL)
@@ -1571,7 +1080,6 @@ dtpn <- function(x, location = 0, scale = 1, skewpar = 0.5,
if (log.arg) logdensity else exp(logdensity)
}
-################ ptpn ################################
ptpn <- function(q, location = 0, scale = 1, skewpar = 0.5) {
if (any(skewpar <= 0 |
@@ -1580,12 +1088,12 @@ ptpn <- function(q, location = 0, scale = 1, skewpar = 0.5) {
na.rm = TRUE))
stop("some parameters out of bound")
-# Reference: Arash handnotes
zedd <- (q - location) / scale
s1 <- 2 * skewpar * pnorm(zedd, sd = 2 * skewpar) #/ scale
- s2 <- skewpar + (1 - skewpar) * pgamma(zedd^2 / (8 * (1-skewpar)^2), 0.5)
+ s2 <- skewpar + (1 - skewpar) *
+ pgamma(zedd^2 / (8 * (1-skewpar)^2), 0.5)
ans <- rep(0.0, length(zedd))
ans[zedd <= 0] <- s1[zedd <= 0]
@@ -1596,11 +1104,10 @@ ans
-##################### qtpn ############################################
pos <- function(x) ifelse(x > 0, x, 0.0)
-qtpn <- function(p, location = 0, scale = 1, skewpar = 0.5){
+qtpn <- function(p, location = 0, scale = 1, skewpar = 0.5) {
pp = p
if (any(pp <= 0 |
@@ -1631,319 +1138,346 @@ qtpn <- function(p, location = 0, scale = 1, skewpar = 0.5){
-########### rast ##########################################
rtpn <- function(n, location = 0, scale = 1, skewpar = 0.5) {
- qtpn(p = runif(n), location = location, scale = scale, skewpar = skewpar)
+ qtpn(p = runif(n), location = location,
+ scale = scale, skewpar = skewpar)
}
-### Two-piece normal family function via VGAM
+
+
tpnff <- function(llocation = "identity", lscale = "loge",
- elocation = list(), escale = list(),
pp = 0.5, method.init = 1, zero = 2)
{
-# Arash : At the moment, I am working on two important(In Quant. Reg.)
-# parameters of the TPN distribution, I am not worry about the skew
-# parameter p.
-# Note : pp = Skewparameter
- if (!is.Numeric(method.init, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
+ if (!is.Numeric(method.init, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
method.init > 4)
- stop("'imethod' must be 1 or 2 or 3 or 4")
+ stop("argument 'imethod' must be 1 or 2 or 3 or 4")
if (!is.Numeric(pp, allowable.length = 1, positive = TRUE))
- stop("bad input for argument 'pp'")
+ stop("bad input for argument 'pp'")
+
+
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (length(zero) && !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
- if (!is.list(elocation)) elocation = list()
- if (!is.list(escale)) escale = list()
new("vglmff",
blurb = c("Two-piece normal distribution \n\n",
"Links: ",
- namesof("location", llocation, earg = elocation), ", ",
- namesof("scale", lscale, earg = escale), "\n\n",
+ namesof("location", llocat, earg = elocat), ", ",
+ namesof("scale", lscale, earg = escale), "\n\n",
"Mean: "),
constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
predictors.names <-
c(namesof("location", .llocat, earg = .elocat, tag = FALSE),
namesof("scale", .lscale, earg = .escale, tag = FALSE))
- if (ncol(y <- cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- if (!length(etastart)) {
- junk = lm.wfit(x = x, y = y, w = w)
- scale.y.est <- sqrt( sum(w * junk$resid^2) / junk$df.residual )
- location.init <- if ( .llocat == "loge") pmax(1/1024, y) else {
- if ( .method.init == 3) {
- rep(weighted.mean(y, w), len = n)
- } else if ( .method.init == 2) {
- rep(median(rep(y, w)), len = n)
- } else if ( .method.init == 1) {
- junk$fitted
- } else {
- y
- }
- }
- etastart <- cbind(
- theta2eta(location.init, .llocat, earg = .elocat),
- theta2eta(scale.y.est, .lscale, earg = .escale))
+
+
+
+
+ if (!length(etastart)) {
+ junk = lm.wfit(x = x, y = y, w = w)
+ scale.y.est <-
+ sqrt( sum(c(w) * junk$resid^2) / junk$df.residual )
+ location.init <- if ( .llocat == "loge")
+ pmax(1/1024, y) else {
+
+ if ( .method.init == 3) {
+ rep(weighted.mean(y, w), len = n)
+ } else if ( .method.init == 2) {
+ rep(median(rep(y, w)), len = n)
+ } else if ( .method.init == 1) {
+ junk$fitted
+ } else {
+ y
+ }
}
- }), list( .llocat = llocation, .lscale = lscale,
- .elocat = elocation, .escale = escale,
- .method.init=method.init ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[,1], .llocat, earg = .elocat)
- }, list( .llocat = llocation,
- .elocat = elocation, .escale = escale ))),
- last = eval(substitute(expression({
- misc$link <- c("location" = .llocat, "scale" = .lscale)
- misc$earg <- list("location" = .elocat, "scale" = .escale)
- misc$expected <- TRUE
- misc$pp <- .pp
- misc$method.init <- .method.init
- }), list( .llocat = llocation, .lscale = lscale,
- .elocat = elocation, .escale = escale,
- .pp = pp, .method.init = method.init ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- location <- eta2theta(eta[,1], .llocat, earg = .elocat)
- myscale <- eta2theta(eta[,2], .lscale, earg = .escale)
- ppay <- .pp
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dtpn(y, skewpar = ppay, location = location, scale = myscale,
- log.arg = TRUE))
- }
- }, list( .llocat = llocation, .lscale = lscale,
- .elocat = elocation, .escale = escale,
- .pp = pp ))),
- vfamily = c("tpnff"),
- deriv = eval(substitute(expression({
- mylocat <- eta2theta(eta[,1], .llocat, earg = .elocat)
- myscale <- eta2theta(eta[,2], .lscale, earg = .escale)
- mypp <- .pp
-
- zedd <- (y - mylocat) / myscale
- # cond1 <- (zedd <= 0)
- cond2 <- (zedd > 0)
-
- dl.dlocat <- zedd / (4 * mypp^2) # cond1
- dl.dlocat[cond2] <- (zedd / (4 * (1 - mypp)^2))[cond2]
- dl.dlocat <- dl.dlocat / myscale
-
- dl.dscale <- zedd^2 / (4 * mypp^2)
- dl.dscale[cond2] <- (zedd^2 / (4 * (1 - mypp)^2))[cond2]
- dl.dscale <- (-1 + dl.dscale) / myscale
-
- #dl.dpp <- zedd^2 / (4 * mypp^3)
- #dl.dpp[cond2] <- -zedd^2 / (4 * (1 - mypp)^3)[cond2]
-
+ etastart <- cbind(
+ theta2eta(location.init, .llocat, earg = .elocat),
+ theta2eta(scale.y.est, .lscale, earg = .escale))
+ }
+ }), list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale,
+ .method.init=method.init ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[, 1], .llocat, earg = .elocat)
+ }, list( .llocat = llocat,
+ .elocat = elocat, .escale = escale ))),
+ last = eval(substitute(expression({
+ misc$link <- c("location" = .llocat, "scale" = .lscale)
+ misc$earg <- list("location" = .elocat, "scale" = .escale)
- dlocat.deta <- dtheta.deta(mylocat, .llocat, earg = .elocat)
- dscale.deta <- dtheta.deta(myscale, .lscale, earg = .escale)
- ans <-
- w * cbind(dl.dlocat * dlocat.deta,
- dl.dscale * dscale.deta)
- ans
- }), list( .llocat = llocation, .lscale = lscale,
- .elocat = elocation, .escale = escale,
- .pp = pp ))),
- weight = eval(substitute(expression({
- wz <- matrix(as.numeric(NA), n, M) # diag matrix; y is one-col too
- temp10 <- mypp * (1 - mypp)
- ed2l.dlocat2 <- 1 / ((4 * temp10) * myscale^2)
- ed2l.dscale2 <- 2 / myscale^2
-# ed2l.dskewpar <- 1 / temp10
-# ed2l.dlocatdskewpar <- (-2 * sqrt(2)) / (temp10 * sqrt(pi) * myscale)
-
+ misc$expected <- TRUE
+ misc$pp <- .pp
+ misc$method.init <- .method.init
+ misc$multipleResponses <- FALSE
+ }), list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale,
+ .pp = pp, .method.init = method.init ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ location <- eta2theta(eta[, 1], .llocat, earg = .elocat)
+ myscale <- eta2theta(eta[, 2], .lscale, earg = .escale)
+ ppay <- .pp
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dtpn(y, skewpar = ppay, location = location,
+ scale = myscale, log.arg = TRUE))
+ }
+ }, list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale,
+ .pp = pp ))),
+ vfamily = c("tpnff"),
+ deriv = eval(substitute(expression({
+ mylocat <- eta2theta(eta[, 1], .llocat, earg = .elocat)
+ myscale <- eta2theta(eta[, 2], .lscale, earg = .escale)
+ mypp <- .pp
- wz[, iam(1,1,M)] <- ed2l.dlocat2 * dlocat.deta^2
- wz[, iam(2,2,M)] <- ed2l.dscale2 * dscale.deta^2
- # wz[, iam(3,3,M)] <- ed2l.dskewpar2 * dskewpa.deta^2
- # wz[, iam(1,3,M)] <- ed2l.dlocatdskewpar * dskewpar.deta * dlocat.deta
- ans
- w * wz
- })
+ zedd <- (y - mylocat) / myscale
+ # cond1 <- (zedd <= 0)
+ cond2 <- (zedd > 0)
+
+ dl.dlocat <- zedd / (4 * mypp^2) # cond1
+ dl.dlocat[cond2] <- (zedd / (4 * (1 - mypp)^2))[cond2]
+ dl.dlocat <- dl.dlocat / myscale
+
+ dl.dscale <- zedd^2 / (4 * mypp^2)
+ dl.dscale[cond2] <- (zedd^2 / (4 * (1 - mypp)^2))[cond2]
+ dl.dscale <- (-1 + dl.dscale) / myscale
+
+ #dl.dpp <- zedd^2 / (4 * mypp^3)
+ #dl.dpp[cond2] <- -zedd^2 / (4 * (1 - mypp)^3)[cond2]
- )))
+ dlocat.deta <- dtheta.deta(mylocat, .llocat, earg = .elocat)
+ dscale.deta <- dtheta.deta(myscale, .lscale, earg = .escale)
+
+ ans <- c(w) * cbind(dl.dlocat * dlocat.deta,
+ dl.dscale * dscale.deta)
+ ans
+ }), list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale,
+ .pp = pp ))),
+ weight = eval(substitute(expression({
+ wz <- matrix(as.numeric(NA), n, M) # diag matrix; y is one-col too
+ temp10 <- mypp * (1 - mypp)
+ ned2l.dlocat2 <- 1 / ((4 * temp10) * myscale^2)
+ ned2l.dscale2 <- 2 / myscale^2
+
+
+ wz[, iam(1, 1,M)] <- ned2l.dlocat2 * dlocat.deta^2
+ wz[, iam(2, 2,M)] <- ned2l.dscale2 * dscale.deta^2
+ # wz[, iam(3, 3,M)] <- ned2l.dskewpar2 * dskewpa.deta^2
+ # wz[, iam(1, 3,M)] <- ned2l.dlocatdskewpar * dskewpar.deta * dlocat.deta
+ ans
+ c(w) * wz
+ }))))
}
########################################################################
-# Two-piece normal family function via VGAM (All 3 parameters will estimate)
-tpnff3 <- function(llocation = "identity", elocation = list(),
- lscale = "loge", escale = list(),
- lskewpar = "identity", eskewpar = list(),
+
+tpnff3 <- function(llocation = "identity",
+ lscale = "loge",
+ lskewpar = "identity",
method.init = 1, zero = 2)
{
if (!is.Numeric(method.init, allowable.length = 1,
integer.valued = TRUE, positive = TRUE) ||
method.init > 4)
- stop("'imethod' must be 1 or 2 or 3 or 4")
+ stop("argument 'imethod' must be 1 or 2 or 3 or 4")
+
+
+
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+ lskewp <- as.list(substitute(lskewpar))
+ eskewp <- link2list(lskewp)
+ lskewp <- attr(eskewp, "function.name")
+
- # if (!is.Numeric(pp, allowable.length = 1, positive = TRUE))
- # stop("bad input for argument 'pp'")
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (mode(lskewpar) != "character" && mode(lskewpar) != "name")
- lscale = as.character(substitute(lscale))
if (length(zero) &&
!is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
+ stop("bad input for argument 'zero'")
+
- if (!is.list(elocation)) elocation = list()
- if (!is.list(escale)) escale = list()
- if (!is.list(eskewpar)) eskewpar = list()
new("vglmff",
blurb = c("Two-piece normal distribution \n\n",
"Links: ",
- namesof("location", llocation, earg = elocation), ", ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("skewpar", lscale, earg = eskewpar), "\n\n",
+ namesof("location", llocat, earg = elocat), ", ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("skewpar", lscale, earg = eskewp), "\n\n",
"Mean: "),
constraints = eval(substitute(expression({
constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
- predictors.names <-
- c(namesof("location", .llocat, earg = .elocat, tag = FALSE),
- namesof("scale", .lscale, earg = .escale, tag = FALSE),
- namesof("skewpar", .lskewpar, earg = .eskewpar, tag = FALSE))
- if (ncol(y <- cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- if (!length(etastart)) {
- junk = lm.wfit(x = x, y = y, w = w)
- scale.y.est <- sqrt( sum(w * junk$resid^2) / junk$df.residual )
- location.init <- if ( .llocat == "loge") pmax(1/1024, y) else {
- if ( .method.init == 3) {
- rep(weighted.mean(y, w), len = n)
- } else if ( .method.init == 2) {
- rep(median(rep(y, w)), len = n)
- } else if ( .method.init == 1) {
- junk$fitted
- } else {
- y
- }
- }
- skew.l.in <- sum((y < location.init)) / length(y)
- etastart <- cbind(
- theta2eta(location.init, .llocat, earg = .elocat),
- theta2eta(scale.y.est, .lscale, earg = .escale),
- theta2eta(skew.l.in, .lskewpar, earg = .escale))
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ predictors.names <-
+ c(namesof("location", .llocat, earg = .elocat, tag = FALSE),
+ namesof("scale", .lscale, earg = .escale, tag = FALSE),
+ namesof("skewpar", .lskewp, earg = .eskewp, tag = FALSE))
+
+ if (!length(etastart)) {
+ junk = lm.wfit(x = x, y = y, w = w)
+ scale.y.est <- sqrt(sum(c(w) * junk$resid^2) / junk$df.residual)
+ location.init <- if ( .llocat == "loge") pmax(1/1024, y) else {
+ if ( .method.init == 3) {
+ rep(weighted.mean(y, w), len = n)
+ } else if ( .method.init == 2) {
+ rep(median(rep(y, w)), len = n)
+ } else if ( .method.init == 1) {
+ junk$fitted
+ } else {
+ y
+ }
}
- }), list( .llocat = llocation, .lscale = lscale, .lskewpar = lskewpar,
- .elocat = elocation, .escale = escale, .eskewpar = eskewpar,
-
- .method.init=method.init ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[,1], .llocat, earg = .elocat)
- }, list( .llocat = llocation,
- .elocat = elocation, .escale = escale ))),
- last = eval(substitute(expression({
- misc$link <- c("location" = .llocat, "scale" = .lscale,
- "skewpar" = .lskewpar)
- misc$earg <- list( "location" = .elocat, "scale" = .escale,
- "skewpar" = .eskewpar)
- misc$expected <- TRUE
- misc$method.init <- .method.init
- }), list( .llocat = llocation, .lscale = lscale, .lskewpar = lskewpar,
- .elocat = elocation, .escale = escale, .eskewpar = lskewpar,
- .method.init = method.init ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- location <- eta2theta(eta[,1], .llocat, earg = .elocat)
- myscale <- eta2theta(eta[,2], .lscale, earg = .escale)
- myskew <- eta2theta(eta[,3], .lskewpar, earg = .eskewpar)
-
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dtpn(y, location = location, scale = myscale,
- skewpar = myskew, log.arg = TRUE))
- }
- }, list( .llocat = llocation, .lscale = lscale, .lskewpar = lskewpar,
- .elocat = elocation, .escale = escale, .eskewpar = eskewpar
- ))),
- vfamily = c("tpnff3"),
- deriv = eval(substitute(expression({
- mylocat <- eta2theta(eta[,1], .llocat, earg = .elocat)
- myscale <- eta2theta(eta[,2], .lscale, earg = .escale)
- myskew <- eta2theta(eta[,3], .lskewpar, earg = .eskewpar)
-
+ skew.l.in <- sum((y < location.init)) / length(y)
+ etastart <- cbind(
+ theta2eta(location.init, .llocat, earg = .elocat),
+ theta2eta(scale.y.est, .lscale, earg = .escale),
+ theta2eta(skew.l.in, .lskewp, earg = .escale))
+ }
+ }), list( .llocat = llocat, .lscale = lscale, .lskewp = lskewp,
+ .elocat = elocat, .escale = escale, .eskewp = eskewp,
+
+ .method.init=method.init ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[, 1], .llocat, earg = .elocat)
+ }, list( .llocat = llocat,
+ .elocat = elocat, .escale = escale ))),
+ last = eval(substitute(expression({
+ misc$link <- c("location" = .llocat,
+ "scale" = .lscale,
+ "skewpar" = .lskewp)
+ misc$earg <- list( "location" = .elocat,
+ "scale" = .escale,
+ "skewpar" = .eskewp)
+ misc$expected <- TRUE
+ misc$method.init <- .method.init
+ }), list( .llocat = llocat, .lscale = lscale, .lskewp = lskewp,
+ .elocat = elocat, .escale = escale, .eskewp = eskewp,
+ .method.init = method.init ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ locat <- eta2theta(eta[, 1], .llocat, earg = .elocat)
+ myscale <- eta2theta(eta[, 2], .lscale, earg = .escale)
+ myskew <- eta2theta(eta[, 3], .lskewp, earg = .eskewp)
+
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dtpn(y, location = locat, scale = myscale,
+ skewpar = myskew, log.arg = TRUE))
+ }
+ }, list( .llocat = llocat, .lscale = lscale, .lskewp = lskewp,
+ .elocat = elocat, .escale = escale, .eskewp = eskewp
+ ))),
+ vfamily = c("tpnff3"),
+ deriv = eval(substitute(expression({
+ mylocat <- eta2theta(eta[, 1], .llocat, earg = .elocat)
+ myscale <- eta2theta(eta[, 2], .lscale, earg = .escale)
+ myskew <- eta2theta(eta[, 3], .lskewp, earg = .eskewp)
+
- zedd <- (y - mylocat) / myscale
- # cond1 <- (zedd <= 0)
- cond2 <- (zedd > 0)
+ zedd <- (y - mylocat) / myscale
+ cond2 <- (zedd > 0)
- dl.dlocat <- zedd / (4 * myskew^2) # cond1
- dl.dlocat[cond2] <- (zedd / (4 * (1 - myskew)^2))[cond2]
- dl.dlocat <- dl.dlocat / myscale
+ dl.dlocat <- zedd / (4 * myskew^2) # cond1
+ dl.dlocat[cond2] <- (zedd / (4 * (1 - myskew)^2))[cond2]
+ dl.dlocat <- dl.dlocat / myscale
- dl.dscale <- zedd^2 / (4 * myskew^2)
- dl.dscale[cond2] <- (zedd^2 / (4 * (1 - myskew)^2))[cond2]
- dl.dscale <- (-1 + dl.dscale) / myscale
+ dl.dscale <- zedd^2 / (4 * myskew^2)
+ dl.dscale[cond2] <- (zedd^2 / (4 * (1 - myskew)^2))[cond2]
+ dl.dscale <- (-1 + dl.dscale) / myscale
- dl.dskewpar <- zedd^2 / (4 * myskew^3)
- dl.dskewpar[cond2] <- (-zedd^2 / (4 * (1 - myskew)^3))[cond2]
-
+ dl.dskewpar <- zedd^2 / (4 * myskew^3)
+ dl.dskewpar[cond2] <- (-zedd^2 / (4 * (1 - myskew)^3))[cond2]
+
- dlocat.deta <- dtheta.deta(mylocat, .llocat, earg = .elocat)
- dscale.deta <- dtheta.deta(myscale, .lscale, earg = .escale)
- dskewpar.deta <- dtheta.deta(myskew, .lskewpar, earg = .eskewpar)
- ans <-
- w * cbind(dl.dlocat * dlocat.deta,
- dl.dscale * dscale.deta,
- dl.dskewpar * dskewpar.deta
- )
- ans
- }), list( .llocat = llocation, .lscale = lscale, .lskewpar = lskewpar,
- .elocat = elocation, .escale = escale, .eskewpar = eskewpar
- ))),
- weight = eval(substitute(expression({
- wz <- matrix(as.numeric(NA), n, dimm(M)) # diag matrix; y is one-col too
-
- temp10 <- myskew * (1 - myskew)
- ed2l.dlocat2 <- 1 / ((4 * temp10) * myscale^2)
- ed2l.dscale2 <- 2 / myscale^2
- ed2l.dskewpar2 <- 3 / temp10
- ed2l.dlocatdskewpar <- (-2 * sqrt(2)) / (temp10 * sqrt(pi) * myscale)
-
- print("hello")
- wz[, iam(1,1,M)] <- ed2l.dlocat2 * dlocat.deta^2
- wz[, iam(2,2,M)] <- ed2l.dscale2 * dscale.deta^2
- wz[, iam(3,3,M)] <- ed2l.dskewpar2 * dskewpar.deta^2
- wz[, iam(1,3,M)] <- ed2l.dlocatdskewpar * dskewpar.deta * dlocat.deta
+ dlocat.deta <- dtheta.deta(mylocat, .llocat, earg = .elocat)
+ dscale.deta <- dtheta.deta(myscale, .lscale, earg = .escale)
+ dskewpar.deta <- dtheta.deta(myskew, .lskewp, earg = .eskewp)
+ ans <-
+ c(w) * cbind(dl.dlocat * dlocat.deta,
+ dl.dscale * dscale.deta,
+ dl.dskewpar * dskewpar.deta
+ )
+ ans
+ }), list( .llocat = llocat, .lscale = lscale, .lskewp = lskewp,
+ .elocat = elocat, .escale = escale, .eskewp = eskewp
+ ))),
+ weight = eval(substitute(expression({
+ wz <- matrix(as.numeric(NA), n, dimm(M)) # diag matrix; y is one-col too
- ans
- w * wz
- })
-
- )))
+ temp10 <- myskew * (1 - myskew)
+
+ ned2l.dlocat2 <- 1 / ((4 * temp10) * myscale^2)
+ ned2l.dscale2 <- 2 / myscale^2
+ ned2l.dskewpar2 <- 3 / temp10
+ ned2l.dlocatdskewpar <- (-2 * sqrt(2)) / (temp10 * sqrt(pi) *
+ myscale)
+
+ wz[, iam(1, 1,M)] <- ned2l.dlocat2 * dlocat.deta^2
+ wz[, iam(2, 2,M)] <- ned2l.dscale2 * dscale.deta^2
+ wz[, iam(3, 3,M)] <- ned2l.dskewpar2 * dskewpar.deta^2
+ wz[, iam(1, 3,M)] <- ned2l.dlocatdskewpar * dskewpar.deta *
+ dlocat.deta
+
+ ans
+ c(w) * wz
+ }))))
}
-# ----------------------------------------------------------------------
-# (c) Not yet assigned
-# ----------------------------------------------------------------------
-# ----------------------------------------------------------------------
diff --git a/R/family.positive.R b/R/family.positive.R
index d9697e0..3659eb5 100644
--- a/R/family.positive.R
+++ b/R/family.positive.R
@@ -8,13 +8,19 @@
-rhuggins91 =
+
+
+rhuggins91 <-
function(n, nTimePts = 5, pvars = length(xcoeff),
xcoeff = c(-2, 1, 2),
capeffect = -1,
double.ch = FALSE,
- link = "logit", earg = list()
- ) {
+ link = "logit",
+ earg.link = FALSE) {
+
+
+
+
use.n <- if ((length.n <- length(n)) > 1) length.n else
@@ -29,9 +35,16 @@ rhuggins91 =
if (pvars > length(xcoeff))
stop("argument 'pvars' is too high")
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+
+ if (earg.link) {
+ earg <- link
+ } else {
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ }
+ link <- attr(earg, "function.name")
+
+
Ymatrix = matrix(0, use.n, nTimePts, dimnames =
@@ -95,7 +108,7 @@ rhuggins91 =
nTimePts = nTimePts, pvars = pvars,
xcoeff = xcoeff,
capeffect = capeffect,
- link = link, earg = earg))
+ link = earg, earg.link = TRUE))
}
rownames(ans) = as.character(1:orig.n)
@@ -119,7 +132,9 @@ dhuggins91 = function(x, prob, prob0 = prob, log = FALSE) {
x = as.matrix(x)
prob = as.matrix(prob)
prob0 = as.matrix(prob0)
- log.arg = log
+
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
rm(log)
@@ -137,7 +152,7 @@ dhuggins91 = function(x, prob, prob0 = prob, log = FALSE) {
- huggins91 = function(link = "logit", earg = list(),
+ huggins91 = function(link = "logit",
parallel = TRUE,
iprob = NULL,
eim.not.oim = TRUE) {
@@ -146,9 +161,11 @@ dhuggins91 = function(x, prob, prob0 = prob, log = FALSE) {
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
if (length(iprob))
if (!is.Numeric(iprob, positive = TRUE) ||
@@ -179,22 +196,36 @@ dhuggins91 = function(x, prob, prob0 = prob, log = FALSE) {
}, list( .parallel = parallel ))),
initialize = eval(substitute(expression({
- Musual = 2
- mustart.orig = mustart
- y = as.matrix(y)
- Mdiv2 = ncoly = ncol(y)
- M = Musual * ncoly
+ Musual <- 2
+ mustart.orig <- mustart
+ y <- as.matrix(y)
+ Mdiv2 <- ncoly <- ncol(y)
+ M <- Musual * ncoly
- w = matrix(w, n, ncoly)
- mustart = matrix(colSums(y) / colSums(w),
- n, ncol(y), byrow = TRUE)
- mustart[mustart == 0] = 0.05
- mustart[mustart == 1] = 0.95
+ w <- matrix(w, n, ncoly)
+ mustart <- matrix(colSums(y) / colSums(w),
+ n, ncol(y), byrow = TRUE)
+ mustart[mustart == 0] <- 0.05
+ mustart[mustart == 1] <- 0.95
if (ncoly == 1)
stop("the response is univariate, therefore use posbinomial()")
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+
if (!all(y == 0 | y == 1))
stop("response must contain 0s and 1s only")
if (!all(w == 1))
@@ -208,7 +239,7 @@ dhuggins91 = function(x, prob, prob0 = prob, log = FALSE) {
}
dn2 = c(dn2, paste(dn2, ".0", sep = ""))
dn2 = dn2[interleave.VGAM(M, M = Musual)]
- predictors.names = namesof(dn2, .link, earg = .earg, short = TRUE)
+ predictors.names <- namesof(dn2, .link , earg = .earg, short = TRUE)
if (!length(etastart)) {
@@ -220,22 +251,22 @@ dhuggins91 = function(x, prob, prob0 = prob, log = FALSE) {
} else {
mustart
}
- etastart = cbind(theta2eta(mustart.use, .link, earg = .earg ))
+ etastart = cbind(theta2eta(mustart.use, .link , earg = .earg ))
etastart = kronecker(etastart, cbind(1, 1))
}
mustart = NULL
}), list( .link = link, .earg = earg, .iprob = iprob ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Musual = 2
- Mdiv2 = ncol(eta) / Musual
- index1 = Musual * (1:Mdiv2) - 1
- index2 = Musual * (1:Mdiv2) - 0
+ Musual <- 2
+ Mdiv2 <- ncol(eta) / Musual
+ index1 <- Musual * (1:Mdiv2) - 1
+ index2 <- Musual * (1:Mdiv2) - 0
probs.numer = eta2theta(eta[, index1], # + extra$moffset[, index1],
- .link, earg = .earg )
+ .link , earg = .earg )
- probs.denom = eta2theta(eta[, index1], .link, earg = .earg )
+ probs.denom = eta2theta(eta[, index1], .link , earg = .earg )
logAA0 = rowSums(log1p(-probs.denom))
@@ -246,7 +277,7 @@ dhuggins91 = function(x, prob, prob0 = prob, log = FALSE) {
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link = rep( .link, length = M)
+ misc$link = rep( .link , length = M)
names(misc$link) = dn2
misc$earg = vector("list", M)
@@ -267,15 +298,15 @@ dhuggins91 = function(x, prob, prob0 = prob, log = FALSE) {
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
ycounts = y
- Musual = 2
- Mdiv2 = ncol(eta) / Musual
- index1 = Musual * (1:Mdiv2) - 1
- index2 = Musual * (1:Mdiv2) - 0
+ Musual <- 2
+ Mdiv2 <- ncol(eta) / Musual
+ index1 <- Musual * (1:Mdiv2) - 1
+ index2 <- Musual * (1:Mdiv2) - 0
probs.numer = eta2theta(eta[, index1], # + extra$moffset[, index1],
- .link, earg = .earg )
+ .link , earg = .earg )
- probs.denom = eta2theta(eta[, index1], .link, earg = .earg )
+ probs.denom = eta2theta(eta[, index1], .link , earg = .earg )
if (residuals) stop("loglikelihood residuals ",
@@ -288,14 +319,14 @@ dhuggins91 = function(x, prob, prob0 = prob, log = FALSE) {
}, list( .link = link, .earg = earg ))),
vfamily = c("huggins91"),
deriv = eval(substitute(expression({
- Musual = 2
- Mdiv2 = ncol(eta) / Musual
- index1 = Musual * (1:Mdiv2) - 1
- index2 = Musual * (1:Mdiv2) - 0
- probs.numer = eta2theta(eta[, index1], .link, earg = .earg )
+ Musual <- 2
+ Mdiv2 <- ncol(eta) / Musual
+ index1 <- Musual * (1:Mdiv2) - 1
+ index2 <- Musual * (1:Mdiv2) - 0
+ probs.numer = eta2theta(eta[, index1], .link , earg = .earg )
- probs.denom = eta2theta(eta[, index1], .link, earg = .earg )
+ probs.denom = eta2theta(eta[, index1], .link , earg = .earg )
logAA0 = rowSums(log1p(-probs.denom))
@@ -396,11 +427,13 @@ dposnegbin = function(x, size, prob = NULL, munb = NULL, log = FALSE) {
stop("'prob' and 'munb' both specified")
prob <- size / (size + munb)
}
- if (!is.logical(log.arg <- log))
- stop("bad input for 'log'")
+
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
rm(log)
- LLL = max(length(x), length(prob), length(size))
+
+ LLL <- max(length(x), length(prob), length(size))
x = rep(x, len = LLL);
prob = rep(prob, len = LLL);
size = rep(size, len = LLL);
@@ -429,7 +462,7 @@ pposnegbin = function(q, size, prob = NULL, munb = NULL) {
stop("'prob' and 'munb' both specified")
prob <- size / (size + munb)
}
- L = max(length(q), length(prob), length(size))
+ L <- max(length(q), length(prob), length(size))
if (length(q) != L)
q = rep(q, length.out = L);
if (length(prob) != L)
@@ -479,7 +512,6 @@ posnegbinomial.control <- function(save.weight = TRUE, ...)
posnegbinomial = function(lmunb = "loge", lsize = "loge",
- emunb = list(), esize = list(),
isize = NULL, zero = -2,
nsimEIM = 250,
shrinkage.init = 0.95, imethod = 1)
@@ -496,13 +528,14 @@ posnegbinomial.control <- function(save.weight = TRUE, ...)
shrinkage.init > 1)
stop("bad input for argument 'shrinkage.init'")
- if (mode(lmunb) != "character" && mode(lmunb) != "name")
- lmunb = as.character(substitute(lmunb))
- if (mode(lsize) != "character" && mode(lsize) != "name")
- lsize = as.character(substitute(lsize))
- if (!is.list(emunb)) emunb = list()
- if (!is.list(esize)) esize = list()
+ lmunb <- as.list(substitute(lmunb))
+ emunb <- link2list(lmunb)
+ lmunb <- attr(emunb, "function.name")
+
+ lsize <- as.list(substitute(lsize))
+ esize <- link2list(lsize)
+ lsize <- attr(esize, "function.name")
if (!is.Numeric(nsimEIM, allowable.length = 1,
@@ -520,33 +553,66 @@ posnegbinomial.control <- function(save.weight = TRUE, ...)
"Mean: munb / (1 - (size / (size + munb))^size)"),
constraints = eval(substitute(expression({
- dotzero = .zero
- Musual = 2
+ dotzero <- .zero
+ Musual <- 2
eval(negzero.expression)
}), list( .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ lmunb = .lmunb ,
+ emunb = .emunb ,
+ lsize = .lsize ,
+ esize = .esize )
+ }, list( .lmunb = lmunb, .lsize = lsize, .isize = isize,
+ .emunb = emunb, .esize = esize,
+ .sinit = shrinkage.init,
+ .imethod = imethod ))),
+
initialize = eval(substitute(expression({
- Musual = 2
+ Musual <- 2
if (any(y == 0))
stop("there are zero values in the response")
y = as.matrix(y)
- M = 2 * ncol(y)
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ Is.integer.y = TRUE,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+ print("head(w)")
+ print( head(w) )
+ print("head(y)")
+ print( head(y) )
+
+
+
+
+
+ M = Musual * ncol(y)
extra$NOS = NOS = ncoly = ncol(y) # Number of species
- predictors.names = c(
+ predictors.names <- c(
namesof(if (NOS == 1) "munb" else
paste("munb", 1:NOS, sep = ""),
.lmunb, earg = .emunb, tag = FALSE),
namesof(if (NOS == 1) "size" else
paste("size", 1:NOS, sep = ""),
.lsize, earg = .esize, tag = FALSE))
- predictors.names = predictors.names[interleave.VGAM(M, M = Musual)]
+ predictors.names <- predictors.names[interleave.VGAM(M, M = Musual)]
if (!length(etastart)) {
mu.init = y
for(iii in 1:ncol(y)) {
use.this = if ( .imethod == 1) {
- weighted.mean(y[, iii], w)
+ weighted.mean(y[, iii], w[, iii])
} else {
median(y[,iii])
}
@@ -567,7 +633,7 @@ posnegbinomial.control <- function(save.weight = TRUE, ...)
for(spp. in 1:NOS) {
kmat0[, spp.] = getMaxMin(k.grid,
objfun = posnegbinomial.Loglikfun,
- y = y[, spp.], x = x, w = w,
+ y = y[, spp.], x = x, w = w[, spp.],
extraargs = mu.init[, spp.])
}
}
@@ -583,7 +649,7 @@ posnegbinomial.control <- function(save.weight = TRUE, ...)
.sinit = shrinkage.init,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Musual = 2
+ Musual <- 2
NOS = ncol(eta) / Musual
munb = eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
.lmunb, earg = .emunb )
@@ -616,8 +682,8 @@ posnegbinomial.control <- function(save.weight = TRUE, ...)
.nsimEIM = nsimEIM, .imethod = imethod ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Musual = 2
- NOS = ncol(eta) / Musual
+ Musual <- 2
+ NOS <- ncol(eta) / Musual
munb = eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
.lmunb, earg = .emunb )
kmat = eta2theta(eta[, Musual*(1:NOS) , drop = FALSE],
@@ -631,8 +697,8 @@ posnegbinomial.control <- function(save.weight = TRUE, ...)
vfamily = c("posnegbinomial"),
deriv = eval(substitute(expression({
- Musual = 2
- NOS = extra$NOS
+ Musual <- 2
+ NOS <- extra$NOS
munb = eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
.lmunb , earg = .emunb )
@@ -669,7 +735,7 @@ posnegbinomial.control <- function(save.weight = TRUE, ...)
.emunb = emunb, .esize = esize ))),
weight = eval(substitute(expression({
run.varcov =
- wz = matrix(0.0, n, 4*NOS-1)
+ wz = matrix(0.0, n, 2 * Musual * NOS - 1)
@@ -723,8 +789,7 @@ posnegbinomial.control <- function(save.weight = TRUE, ...)
}
-
- c(w) * wz
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
}), list( .nsimEIM = nsimEIM ))))
}
@@ -740,7 +805,7 @@ dposgeom = function(x, prob, log = FALSE) {
pposgeom = function(q, prob) {
if (!is.Numeric(prob, positive = TRUE))
stop("bad input for argument 'prob'")
- L = max(length(q), length(prob))
+ L <- max(length(q), length(prob))
if (length(q) != L) q = rep(q, length.out = L);
if (length(prob) != L) prob = rep(prob, length.out = L);
ifelse(q < 1, 0,
@@ -780,12 +845,14 @@ rposgeom = function(n, prob) {
dpospois = function(x, lambda, log = FALSE) {
- if (!is.logical(log.arg <- log)) stop("bad input for 'log'")
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
rm(log)
+
if (!is.Numeric(lambda, positive = TRUE))
stop("bad input for argument 'lambda'")
- L = max(length(x), length(lambda))
+ L <- max(length(x), length(lambda))
x = rep(x, len = L); lambda = rep(lambda, len = L);
ans = if (log.arg) {
@@ -801,7 +868,7 @@ dpospois = function(x, lambda, log = FALSE) {
ppospois = function(q, lambda) {
if (!is.Numeric(lambda, positive = TRUE))
stop("bad input for argument 'lambda'")
- L = max(length(q), length(lambda))
+ L <- max(length(q), length(lambda))
if (length(q) != L) q = rep(q, length.out = L);
if (length(lambda) != L) lambda = rep(lambda, length.out = L);
@@ -851,13 +918,14 @@ rposnegbin = function(n, size, prob = NULL, munb = NULL) {
- pospoisson = function(link = "loge", earg = list(), expected = TRUE,
- ilambda = NULL, imethod = 1)
+ pospoisson = function(link = "loge", expected = TRUE,
+ ilambda = NULL, imethod = 1, zero = NULL)
{
- if (mode(link) != "character" && mode(link) != "name")
- link <- as.character(substitute(link))
- if (!is.list(earg)) earg <- list()
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
if (!is.logical(expected) || length(expected) != 1)
stop("bad input for argument 'expected'")
@@ -869,34 +937,61 @@ rposnegbin = function(n, size, prob = NULL, munb = NULL) {
imethod > 3)
stop("argument 'imethod' must be 1 or 2 or 3")
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+
new("vglmff",
blurb = c("Positive-Poisson distribution\n\n",
"Links: ",
namesof("lambda", link, earg = earg, tag = FALSE)),
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 1
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+
infos = eval(substitute(function(...) {
list(Musual = 1,
- link = .link,
+ link = .link ,
earg = .earg)
}, list( .link = link, .earg = earg ))),
initialize = eval(substitute(expression({
- y <- as.matrix(y)
- if (any(y < 1))
- stop("all y values must be in 1,2,3,...")
- if (any(y != round(y )))
- stop("the response must be integer-valued")
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ Is.integer.y = TRUE,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+ ncoly <- ncol(y)
+ Musual <- 1
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+
+ mynames1 <- paste("lambda",
+ if (ncoly > 1) 1:ncoly else "", sep = "")
predictors.names <-
- namesof(paste("lambda", if (ncol(y) > 1) 1:ncol(y) else "", sep = ""),
- .link, earg = .earg, tag = FALSE)
+ namesof(mynames1, .link , earg = .earg, tag = FALSE)
if ( .imethod == 1) {
lambda.init <- apply(y, 2, median) + 1/8
- lambda.init <- matrix(lambda.init, n, ncol(y), byrow = TRUE)
+ lambda.init <- matrix(lambda.init, n, ncoly, byrow = TRUE)
} else if ( .imethod == 2) {
lambda.init <- apply(y, 2, weighted.mean, w = w) + 1/8
- lambda.init <- matrix(lambda.init, n, ncol(y), byrow = TRUE)
+ lambda.init <- matrix(lambda.init, n, ncoly, byrow = TRUE)
} else {
lambda.init <- -y / expm1(-y)
}
@@ -904,28 +999,29 @@ rposnegbin = function(n, size, prob = NULL, munb = NULL) {
lambda.init <- lambda.init * 0 + .ilambda
if (!length(etastart))
- etastart <- theta2eta(lambda.init, .link, earg = .earg)
+ etastart <- theta2eta(lambda.init, .link , earg = .earg)
}), list( .link = link, .earg = earg,
.ilambda = ilambda, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- lambda <- eta2theta(eta, .link, earg = .earg )
+ lambda <- eta2theta(eta, .link , earg = .earg )
-lambda / expm1(-lambda)
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$expected <- .expected
-
- misc$link <- rep( .link, len = M)
- names(misc$link) <- if (M == 1) "lambda" else
- paste("lambda", 1:M, sep = "")
+ misc$link <- rep( .link , len = M)
+ names(misc$link) <- mynames1
misc$earg <- vector("list", M)
- names(misc$earg) <- names(misc$link)
+ names(misc$earg) <- mynames1
for(ii in 1:M)
misc$earg[[ii]] <- .earg
+
+ misc$Musual <- Musual
+ misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
}), list( .link = link, .earg = earg, .expected = expected ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- lambda <- eta2theta(eta, .link, earg = .earg )
+ lambda <- eta2theta(eta, .link , earg = .earg )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
@@ -934,19 +1030,22 @@ rposnegbin = function(n, size, prob = NULL, munb = NULL) {
}, list( .link = link, .earg = earg ))),
vfamily = c("pospoisson"),
deriv = eval(substitute(expression({
- lambda <- eta2theta(eta, .link, earg = .earg )
+ lambda <- eta2theta(eta, .link , earg = .earg )
+
temp6 <- expm1(lambda)
dl.dlambda <- y / lambda - 1 - 1 / temp6
- dlambda.deta <- dtheta.deta(lambda, .link, earg = .earg )
- w * dl.dlambda * dlambda.deta
+
+ dlambda.deta <- dtheta.deta(lambda, .link , earg = .earg )
+
+ c(w) * dl.dlambda * dlambda.deta
}), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
if ( .expected ) {
- ed2l.dlambda2 <- (temp6 + 1) * (1/lambda - 1/temp6) / temp6
- wz <- (dlambda.deta^2) * ed2l.dlambda2
+ ned2l.dlambda2 <- (temp6 + 1) * (1/lambda - 1/temp6) / temp6
+ wz <- ned2l.dlambda2 * dlambda.deta^2
} else {
d2l.dlambda2 <- y / lambda^2 - (temp6 + 1) / temp6^2
- d2lambda.deta2 <- d2theta.deta2(lambda, .link, earg = .earg)
+ d2lambda.deta2 <- d2theta.deta2(lambda, .link , earg = .earg)
wz <- (dlambda.deta^2) * d2l.dlambda2 - dl.dlambda * d2lambda.deta2
}
c(w) * wz
@@ -966,7 +1065,7 @@ pposbinom = function(q, size, prob
if (!is.Numeric(prob, positive = TRUE))
stop("no zero or non-numeric values allowed for argument 'prob'")
- L = max(length(q), length(size), length(prob))
+ L <- max(length(q), length(size), length(prob))
if (length(q) != L) q = rep(q, length.out = L);
if (length(size) != L) size = rep(size, length.out = L);
if (length(prob) != L) prob = rep(prob, length.out = L);
@@ -984,9 +1083,9 @@ qposbinom = function(p, size, prob
- ans = qbinom(pbinom(0, size, prob, lower.tail = FALSE) * p +
- dbinom(0, size, prob),
- size = size, prob = prob)
+ ans <- qbinom(pbinom(0, size, prob, lower.tail = FALSE) * p +
+ dbinom(0, size, prob),
+ size = size, prob = prob)
ans[p > 1] = NaN
ans[p < 0] = NaN
@@ -1003,9 +1102,12 @@ rposbinom = function(n, size, prob) {
dposbinom = function(x, size, prob, log = FALSE) {
- log.arg = log
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
rm(log)
- L = max(length(x), length(size), length(prob))
+
+
+ L <- max(length(x), length(size), length(prob))
x = rep(x, len = L);
size = rep(size, len = L);
prob = rep(prob, len = L);
@@ -1033,13 +1135,23 @@ dposbinom = function(x, size, prob, log = FALSE) {
- posbinomial = function(link = "logit", earg = list(),
- mv = FALSE, parallel = FALSE, zero = NULL) {
+ posbinomial <-
+ function(link = "logit",
+ mv = FALSE, parallel = FALSE, zero = NULL) {
+
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+
+ if (!is.logical(mv) || length(mv) != 1)
+ stop("bad input for argument 'mv'")
+
+ if (mv && length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE))
+ stop("bad input for argument 'zero'")
new("vglmff",
@@ -1053,7 +1165,10 @@ dposbinom = function(x, size, prob, log = FALSE) {
"\n"),
constraints = eval(substitute(expression({
constraints <- cm.vgam(matrix(1, M, 1), x, .parallel, constraints)
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
+
+ dotzero <- .zero
+ Musual <- 1
+ eval(negzero.expression)
}), list( .parallel = parallel, .zero = zero ))),
infos = eval(substitute(function(...) {
list(Musual = 1,
@@ -1062,49 +1177,66 @@ dposbinom = function(x, size, prob, log = FALSE) {
initialize = eval(substitute(expression({
- mustart.orig = mustart
+ mustart.orig <- mustart
if ( .mv ) {
- y = as.matrix(y)
- M = ncoly = ncol(y)
- extra$orig.w = w
- w = as.matrix(w) # Added 20110308
- mustart = matrix(colSums(y) / colSums(w),
- n, ncol(y), byrow = TRUE)
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ ncoly <- ncol(y)
+ Musual <- 1
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+ extra$orig.w <- w
+ mustart <- matrix(colSums(y) / colSums(w), # Not colSums(y * w)...
+ n, ncoly, byrow = TRUE)
} else {
- eval(binomialff(link = .link, earg = .earg )@initialize)
+ eval(binomialff(link = .earg , # earg = .earg ,
+ earg.link = TRUE)@initialize)
}
if ( .mv ) {
- dn2 = if (is.matrix(y)) dimnames(y)[[2]] else NULL
- dn2 = if (length(dn2)) {
+ dn2 <- if (is.matrix(y)) dimnames(y)[[2]] else NULL
+ dn2 <- if (length(dn2)) {
paste("E[", dn2, "]", sep = "")
} else {
paste("prob", 1:M, sep = "")
}
- predictors.names = namesof(if (M > 1) dn2 else
- "prob", .link, earg = .earg, short = TRUE)
+ predictors.names <- namesof(if (M > 1) dn2 else
+ "prob", .link , earg = .earg, short = TRUE)
- w = matrix(w, n, ncoly)
- y = y / w # Now sample proportion
+ w <- matrix(w, n, ncoly)
+ y <- y / w # Now sample proportion
} else {
- predictors.names =
- namesof("prob", .link, earg = .earg , tag = FALSE)
+ predictors.names <-
+ namesof("prob", .link , earg = .earg , tag = FALSE)
}
- if (length(extra)) extra$w = w else extra = list(w = w)
+ if (length(extra)) extra$w <- w else extra <- list(w = w)
if (!length(etastart)) {
- mustart.use = if (length(mustart.orig)) mustart.orig else mustart
- etastart = cbind(theta2eta(mustart.use, .link, earg = .earg ))
+ mustart.use <- if (length(mustart.orig)) mustart.orig else mustart
+ etastart <- cbind(theta2eta(mustart.use, .link , earg = .earg ))
}
- mustart = NULL
- }), list( .link = link, .earg = earg, .mv = mv ))),
+ mustart <- NULL
+ }), list( .link = link,
+ .earg = earg, .mv = mv ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
w = extra$w
- mymu = eta2theta(eta, .link, earg = .earg )
+ mymu = eta2theta(eta, .link , earg = .earg )
nvec = if ( .mv ) {
w
} else {
@@ -1115,10 +1247,10 @@ dposbinom = function(x, size, prob, log = FALSE) {
},
list( .link = link, .earg = earg, .mv = mv ))),
last = eval(substitute(expression({
- extra$w = NULL # Kill it off
+ extra$w = NULL # Kill it off
- misc$link = rep( .link, length = M)
+ misc$link = rep( .link , length = M)
names(misc$link) = if (M > 1) dn2 else "prob"
misc$earg = vector("list", M)
@@ -1146,7 +1278,7 @@ dposbinom = function(x, size, prob, log = FALSE) {
round(w)
}
use.orig.w = if (is.numeric(extra$orig.w)) extra$orig.w else 1
- mymu = eta2theta(eta, .link, earg = .earg )
+ mymu = eta2theta(eta, .link , earg = .earg )
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
@@ -1165,8 +1297,8 @@ dposbinom = function(x, size, prob, log = FALSE) {
if (is.numeric(extra$orig.w)) round(w / extra$orig.w) else
round(w)
}
- mymu = eta2theta(eta, .link, earg = .earg )
- dmu.deta = dtheta.deta(mymu, .link, earg = .earg )
+ mymu = eta2theta(eta, .link , earg = .earg )
+ dmu.deta = dtheta.deta(mymu, .link , earg = .earg )
temp1 = 1 - (1 - mymu)^nvec
temp2 = (1 - mymu)^2
@@ -1178,11 +1310,11 @@ dposbinom = function(x, size, prob, log = FALSE) {
c(w) * dl.dmu * dmu.deta
}), list( .link = link, .earg = earg, .mv = mv ))),
weight = eval(substitute(expression({
- ed2l.dmu2 = 1 / (mymu * temp1) + 1 / temp2 -
- mymu / (temp1 * temp2) -
- (nvec-1) * temp3 / temp1 -
- nvec * (temp2^(nvec-1)) / temp1^2
- wz = c(w) * ed2l.dmu2 * dmu.deta^2
+ ned2l.dmu2 = 1 / (mymu * temp1) + 1 / temp2 -
+ mymu / (temp1 * temp2) -
+ (nvec-1) * temp3 / temp1 -
+ nvec * (temp2^(nvec-1)) / temp1^2
+ wz = c(w) * ned2l.dmu2 * dmu.deta^2
wz
}), list( .link = link, .earg = earg, .mv = mv ))))
}
@@ -1193,11 +1325,13 @@ dposbinom = function(x, size, prob, log = FALSE) {
- rasch = function(lability = "identity", eability = list(),
- ldifficulty = "identity", edifficulty = list(),
- iability = NULL,
- idifficulty = NULL,
- parallel = TRUE) {
+if (FALSE) rasch <-
+ function(lability = "identity", eability = list(),
+ ldifficulty = "identity", edifficulty = list(),
+ iability = NULL,
+ idifficulty = NULL,
+ parallel = TRUE) {
+
@@ -1258,7 +1392,7 @@ dposbinom = function(x, size, prob, log = FALSE) {
paste("zz", 1:Mdiv2, sep = "")
}
dn2 = c(dn2, paste("item", as.character(1:nrow(y)), sep = ""))
- predictors.names =
+ predictors.names <-
namesof(dn2, .labil, earg = .eability, short = TRUE)
@@ -1316,8 +1450,6 @@ dposbinom = function(x, size, prob, log = FALSE) {
.ldiff = ldiff, .ediff = ediff ))),
vfamily = c("rasch"),
deriv = eval(substitute(expression({
- print("head(mu)")
- print( head(mu) )
dabil.deta = 1
ddiff.deta = 1
@@ -1326,8 +1458,6 @@ dposbinom = function(x, size, prob, log = FALSE) {
deriv.ans = cbind(dl.dabil * dabil.deta,
dl.ddiff * ddiff.deta)
- print("head(deriv.ans)")
- print( head(deriv.ans) )
deriv.ans
}), list( .labil = labil, .eabil = eabil,
@@ -1352,8 +1482,6 @@ dposbinom = function(x, size, prob, log = FALSE) {
for (jay in 1:ncoly)
wz[ii, iam(ii, jay, M = M)] = -mu[ii, jay] * (1 - mu[ii, jay])
- print("head(wz)")
- print( head(wz) )
wz = wz * w
wz
diff --git a/R/family.qreg.R b/R/family.qreg.R
index f418a55..1611b4e 100644
--- a/R/family.qreg.R
+++ b/R/family.qreg.R
@@ -17,213 +17,227 @@
+
+
lms.bcn.control <-
lms.bcg.control <-
lms.yjn.control <- function(trace = TRUE, ...)
- list(trace=trace)
+ list(trace = trace)
lms.bcn <- function(percentiles = c(25, 50, 75),
- zero = c(1, 3),
- llambda = "identity",
- lmu = "identity",
- lsigma = "loge",
- elambda = list(), emu = list(), esigma = list(),
- dfmu.init=4,
- dfsigma.init = 2,
- ilambda = 1,
- isigma = NULL, expectiles = FALSE)
+ zero = c(1, 3),
+ llambda = "identity",
+ lmu = "identity",
+ lsigma = "loge",
+ dfmu.init=4,
+ dfsigma.init = 2,
+ ilambda = 1,
+ isigma = NULL, expectiles = FALSE)
{
- if (mode(llambda) != "character" && mode(llambda) != "name")
- llambda = as.character(substitute(llambda))
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu = as.character(substitute(lmu))
- if (mode(lsigma) != "character" && mode(lsigma) != "name")
- lsigma = as.character(substitute(lsigma))
+ llambda <- as.list(substitute(llambda))
+ elambda <- link2list(llambda)
+ llambda <- attr(elambda, "function.name")
- if (!is.list(elambda)) elambda = list()
- if (!is.list(emu)) emu = list()
- if (!is.list(esigma)) esigma = list()
+ lmu <- as.list(substitute(lmu))
+ emu <- link2list(lmu)
+ lmu <- attr(emu, "function.name")
- if (!is.Numeric(ilambda))
- stop("bad input for argument 'ilambda'")
- if (length(isigma) &&
- !is.Numeric(isigma, positive = TRUE))
- stop("bad input for argument 'isigma'")
- if (length(expectiles) != 1 || !is.logical(expectiles))
- stop("bad input for argument 'expectiles'")
+ lsigma <- as.list(substitute(lsigma))
+ esigma <- link2list(lsigma)
+ lsigma <- attr(esigma, "function.name")
- new("vglmff",
- blurb = c("LMS ", if (expectiles) "Expectile" else "Quantile",
- " Regression (Box-Cox transformation to normality)\n",
- "Links: ",
- namesof("lambda", link = llambda, earg = elambda), ", ",
- namesof("mu", link = lmu, earg = emu), ", ",
- namesof("sigma", link = lsigma, earg = esigma)),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list(.zero=zero))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- if (any(y<0, na.rm = TRUE))
- stop("negative responses not allowed")
-
- predictors.names =
- c(namesof("lambda", .llambda, earg = .elambda, short= TRUE),
- namesof("mu", .lmu, earg = .emu, short= TRUE),
- namesof("sigma", .lsigma, earg = .esigma, short= TRUE))
-
- if (!length(etastart)) {
- Fit5 = vsmooth.spline(x = x[, min(ncol(x), 2)],
- y = y, w = w, df = .dfmu.init)
- fv.init = c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
+ if (!is.Numeric(ilambda))
+ stop("bad input for argument 'ilambda'")
+ if (length(isigma) &&
+ !is.Numeric(isigma, positive = TRUE))
+ stop("bad input for argument 'isigma'")
+ if (length(expectiles) != 1 || !is.logical(expectiles))
+ stop("bad input for argument 'expectiles'")
- lambda.init = if (is.Numeric( .ilambda )) .ilambda else 1.0
- sigma.init = if (is.null(.isigma)) {
- myratio = ((y/fv.init)^lambda.init - 1) / lambda.init
- if (is.Numeric( .dfsigma.init )) {
- fit600 = vsmooth.spline(x = x[, min(ncol(x), 2)],
- y = myratio^2,
- w = w, df = .dfsigma.init)
- sqrt(c(abs(predict(fit600, x = x[, min(ncol(x), 2)])$y)))
- } else
- sqrt(var(myratio))
- } else .isigma
-
- etastart =
- cbind(theta2eta(lambda.init, .llambda, earg = .elambda),
- theta2eta(fv.init, .lmu, earg = .emu),
- theta2eta(sigma.init, .lsigma, earg = .esigma))
- }
- }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
- .elambda = elambda, .emu = emu, .esigma = esigma,
- .dfmu.init = dfmu.init,
- .dfsigma.init = dfsigma.init,
- .ilambda = ilambda, .isigma = isigma ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta[, 1] = eta2theta(eta[, 1], .llambda, earg = .elambda)
- eta[, 2] = eta2theta(eta[, 2], .lmu, earg = .emu)
- eta[, 3] = eta2theta(eta[, 3], .lsigma, earg = .esigma)
- if ( .expectiles ) {
- explot.lms.bcn(percentiles= .percentiles, eta = eta)
- } else {
- qtplot.lms.bcn(percentiles= .percentiles, eta = eta)
- }
+ new("vglmff",
+ blurb = c("LMS ", if (expectiles) "Expectile" else "Quantile",
+ " Regression (Box-Cox transformation to normality)\n",
+ "Links: ",
+ namesof("lambda", link = llambda, earg = elambda), ", ",
+ namesof("mu", link = lmu, earg = emu), ", ",
+ namesof("sigma", link = lsigma, earg = esigma)),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list(.zero = zero))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+ predictors.names <-
+ c(namesof("lambda", .llambda, earg = .elambda, short= TRUE),
+ namesof("mu", .lmu, earg = .emu, short= TRUE),
+ namesof("sigma", .lsigma, earg = .esigma, short= TRUE))
+
+ if (!length(etastart)) {
+
+ Fit5 = vsmooth.spline(x = x[, min(ncol(x), 2)],
+ y = y, w = w, df = .dfmu.init)
+ fv.init = c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
+
+ lambda.init = if (is.Numeric( .ilambda )) .ilambda else 1.0
+ sigma.init = if (is.null(.isigma)) {
+ myratio = ((y/fv.init)^lambda.init - 1) / lambda.init
+ if (is.Numeric( .dfsigma.init )) {
+ fit600 = vsmooth.spline(x = x[, min(ncol(x), 2)],
+ y = myratio^2,
+ w = w, df = .dfsigma.init)
+ sqrt(c(abs(predict(fit600, x = x[, min(ncol(x), 2)])$y)))
+ } else
+ sqrt(var(myratio))
+ } else .isigma
+
+ etastart =
+ cbind(theta2eta(lambda.init, .llambda, earg = .elambda),
+ theta2eta(fv.init, .lmu, earg = .emu),
+ theta2eta(sigma.init, .lsigma, earg = .esigma))
+ }
+ }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma,
+ .dfmu.init = dfmu.init,
+ .dfsigma.init = dfsigma.init,
+ .ilambda = ilambda, .isigma = isigma ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta[, 1] = eta2theta(eta[, 1], .llambda, earg = .elambda)
+ eta[, 2] = eta2theta(eta[, 2], .lmu, earg = .emu)
+ eta[, 3] = eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ if ( .expectiles ) {
+ explot.lms.bcn(percentiles= .percentiles, eta = eta)
+ } else {
+ qtplot.lms.bcn(percentiles= .percentiles, eta = eta)
+ }
+ }, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma,
+ .percentiles = percentiles, .expectiles = expectiles ))),
+ last = eval(substitute(expression({
+ misc$links <- c(lambda = .llambda, mu = .lmu, sigma = .lsigma )
+
+ misc$earg <- list(lambda = .elambda, mu = .emu, sigma = .esigma )
+
+ misc$percentiles <- .percentiles
+ misc$true.mu <- FALSE # @fitted is not a true mu
+ misc$expectiles <- .expectiles
+ if (control$cdf) {
+ post$cdf = cdf.lms.bcn(y,
+ eta0 = matrix(c(lambda, mymu, sigma), ncol = 3,
+ dimnames = list(dimnames(x)[[1]], NULL)))
+ }
+ }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma,
+ .percentiles = percentiles, .expectiles = expectiles ))),
+ loglikelihood = eval(substitute(
+ function(mu,y, w, residuals= FALSE, eta, extra = NULL) {
+ lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
+ muvec = eta2theta(eta[, 2], .lmu, earg = .emu)
+ sigma = eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ zedd = ((y/muvec)^lambda - 1) / (lambda * sigma)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented") else {
+ use.this = (lambda * log(y / muvec) - log(sigma) - log(y) +
+ dnorm(zedd, log = TRUE))
+ use.this[abs(lambda) < 0.001] =
+ (-log(y / muvec) - log(sigma) +
+ dnorm(zedd, log = TRUE))[abs(lambda) < 0.001]
+ sum(c(w) * use.this)
+ }
}, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
- .elambda = elambda, .emu = emu, .esigma = esigma,
- .percentiles = percentiles, .expectiles = expectiles ))),
- last = eval(substitute(expression({
- misc$percentiles = .percentiles
- misc$links = c(lambda = .llambda, mu = .lmu, sigma = .lsigma)
- misc$earg = list(lambda = .elambda, mu = .emu, sigma = .esigma)
- misc$true.mu = FALSE # $fitted is not a true mu
- misc$expectiles = .expectiles
- if (control$cdf) {
- post$cdf = cdf.lms.bcn(y, eta0=matrix(c(lambda,mymu,sigma),
- ncol=3, dimnames = list(dimnames(x)[[1]], NULL)))
- }
- }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
- .elambda = elambda, .emu = emu, .esigma = esigma,
- .percentiles = percentiles, .expectiles = expectiles ))),
- loglikelihood = eval(substitute(
- function(mu,y, w, residuals= FALSE, eta, extra = NULL) {
- lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
- muvec = eta2theta(eta[, 2], .lmu, earg = .emu)
- sigma = eta2theta(eta[, 3], .lsigma, earg = .esigma)
- zedd = ((y/muvec)^lambda - 1) / (lambda * sigma)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented") else {
- use.this = (lambda * log(y / muvec) - log(sigma) - log(y) +
- dnorm(zedd, log = TRUE))
- use.this[abs(lambda) < 0.001] = (-log(y / muvec) - log(sigma) +
- dnorm(zedd, log = TRUE))[abs(lambda) < 0.001]
- sum(w * use.this)
- }
- }, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
- .elambda = elambda, .emu = emu, .esigma = esigma ))),
- vfamily = c("lms.bcn", "lmscreg"),
- deriv = eval(substitute(expression({
- lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
- mymu = eta2theta(eta[, 2], .lmu, earg = .emu)
- sigma = eta2theta(eta[, 3], .lsigma, earg = .esigma)
- zedd = ((y/mymu)^lambda - 1) / (lambda * sigma)
- z2m1 = zedd * zedd - 1
- dl.dlambda = zedd*(zedd - log(y/mymu) / sigma) / lambda -
- z2m1 * log(y/mymu)
- dl.dmu = zedd / (mymu * sigma) + z2m1 * lambda / mymu
- dl.dsigma = z2m1 / sigma
- dlambda.deta = dtheta.deta(lambda, .llambda, earg = .elambda)
- dmu.deta = dtheta.deta(mymu, .lmu, earg = .emu)
- dsigma.deta = dtheta.deta(sigma, .lsigma, earg = .esigma)
- c(w) * cbind(dl.dlambda * dlambda.deta,
- dl.dmu * dmu.deta,
- dl.dsigma * dsigma.deta)
- }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
- .elambda = elambda, .emu = emu, .esigma = esigma ))),
- weight = eval(substitute(expression({
- wz = matrix(as.numeric(NA), n, 6)
- wz[,iam(1,1,M)] = (7 * sigma^2 / 4) * dlambda.deta^2
- wz[,iam(2,2,M)] = (1 + 2*(lambda*sigma)^2)/(mymu*sigma)^2 * dmu.deta^2
- wz[,iam(3,3,M)] = (2 / sigma^2) * dsigma.deta^2
- wz[,iam(1,2,M)] = (-1 / (2 * mymu)) * dlambda.deta * dmu.deta
- wz[,iam(1,3,M)] = (lambda * sigma) * dlambda.deta * dsigma.deta
- wz[,iam(2,3,M)] = (2*lambda/(mymu * sigma)) * dmu.deta * dsigma.deta
- c(w) * wz
- }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
- .elambda = elambda, .emu = emu, .esigma = esigma ))))
+ .elambda = elambda, .emu = emu, .esigma = esigma ))),
+ vfamily = c("lms.bcn", "lmscreg"),
+ deriv = eval(substitute(expression({
+ lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
+ mymu = eta2theta(eta[, 2], .lmu, earg = .emu)
+ sigma = eta2theta(eta[, 3], .lsigma, earg = .esigma)
+
+ zedd = ((y/mymu)^lambda - 1) / (lambda * sigma)
+ z2m1 = zedd * zedd - 1
+ dl.dlambda = zedd*(zedd - log(y/mymu) / sigma) / lambda -
+ z2m1 * log(y/mymu)
+ dl.dmu = zedd / (mymu * sigma) + z2m1 * lambda / mymu
+ dl.dsigma = z2m1 / sigma
+ dlambda.deta = dtheta.deta(lambda, .llambda, earg = .elambda)
+
+ dmu.deta = dtheta.deta(mymu, .lmu, earg = .emu)
+ dsigma.deta = dtheta.deta(sigma, .lsigma, earg = .esigma)
+
+ c(w) * cbind(dl.dlambda * dlambda.deta,
+ dl.dmu * dmu.deta,
+ dl.dsigma * dsigma.deta)
+ }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma ))),
+ weight = eval(substitute(expression({
+ wz = matrix(as.numeric(NA), n, 6)
+ wz[,iam(1, 1, M)] = (7 * sigma^2 / 4) * dlambda.deta^2
+ wz[,iam(2, 2, M)] = (1 + 2*(lambda*sigma)^2)/(mymu*sigma)^2 *
+ dmu.deta^2
+ wz[,iam(3, 3, M)] = (2 / sigma^2) * dsigma.deta^2
+ wz[,iam(1, 2, M)] = (-1 / (2 * mymu)) * dlambda.deta * dmu.deta
+ wz[,iam(1, 3, M)] = (lambda * sigma) * dlambda.deta * dsigma.deta
+ wz[,iam(2, 3, M)] = (2*lambda/(mymu * sigma)) *
+ dmu.deta * dsigma.deta
+ c(w) * wz
+ }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma ))))
}
- lms.bcg = function(percentiles = c(25, 50, 75),
- zero = c(1,3),
- llambda = "identity",
- lmu = "identity",
- lsigma = "loge",
- elambda = list(), emu = list(), esigma = list(),
- dfmu.init=4,
- dfsigma.init = 2,
- ilambda = 1,
- isigma = NULL)
+ lms.bcg <- function(percentiles = c(25, 50, 75),
+ zero = c(1, 3),
+ llambda = "identity",
+ lmu = "identity",
+ lsigma = "loge",
+ dfmu.init=4,
+ dfsigma.init = 2,
+ ilambda = 1,
+ isigma = NULL)
{
- if (mode(llambda) != "character" && mode(llambda) != "name")
- llambda = as.character(substitute(llambda))
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu = as.character(substitute(lmu))
- if (mode(lsigma) != "character" && mode(lsigma) != "name")
- lsigma = as.character(substitute(lsigma))
+ llambda <- as.list(substitute(llambda))
+ elambda <- link2list(llambda)
+ llambda <- attr(elambda, "function.name")
+
+ lmu <- as.list(substitute(lmu))
+ emu <- link2list(lmu)
+ lmu <- attr(emu, "function.name")
+
+ lsigma <- as.list(substitute(lsigma))
+ esigma <- link2list(lsigma)
+ lsigma <- attr(esigma, "function.name")
- if (!is.list(elambda)) elambda = list()
- if (!is.list(emu)) emu = list()
- if (!is.list(esigma)) esigma = list()
if (!is.Numeric(ilambda))
stop("bad input for argument 'ilambda'")
if (length(isigma) && !is.Numeric(isigma, positive = TRUE))
stop("bad input for argument 'isigma'")
- new("vglmff",
- blurb = c("LMS Quantile Regression ",
- "(Box-Cox transformation to a Gamma distribution)\n",
- "Links: ",
- namesof("lambda", link = llambda, earg = elambda), ", ",
- namesof("mu", link = lmu, earg = emu), ", ",
- namesof("sigma", link = lsigma, earg = esigma)),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list(.zero=zero))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- if (any(y<0, na.rm = TRUE))
- stop("negative responses not allowed")
-
- predictors.names = c(
+ new("vglmff",
+ blurb = c("LMS Quantile Regression ",
+ "(Box-Cox transformation to a Gamma distribution)\n",
+ "Links: ",
+ namesof("lambda", link = llambda, earg = elambda), ", ",
+ namesof("mu", link = lmu, earg = emu), ", ",
+ namesof("sigma", link = lsigma, earg = esigma)),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list(.zero = zero))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+ predictors.names <- c(
namesof("lambda", .llambda, earg = .elambda, short = TRUE),
namesof("mu", .lmu, earg = .emu, short = TRUE),
namesof("sigma", .lsigma, earg = .esigma, short = TRUE))
@@ -234,7 +248,7 @@ lms.yjn.control <- function(trace = TRUE, ...)
y = y, w = w, df = .dfmu.init)
fv.init = c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
- lambda.init = if (is.Numeric( .ilambda)) .ilambda else 1.0
+ lambda.init = if (is.Numeric( .ilambda )) .ilambda else 1.0
sigma.init = if (is.null(.isigma)) {
myratio = ((y/fv.init)^lambda.init-1) / lambda.init
@@ -253,97 +267,102 @@ lms.yjn.control <- function(trace = TRUE, ...)
theta2eta(fv.init, .lmu, earg = .emu),
theta2eta(sigma.init, .lsigma, earg = .esigma))
}
- }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
- .elambda = elambda, .emu = emu, .esigma = esigma,
- .dfmu.init = dfmu.init,
- .dfsigma.init = dfsigma.init,
- .ilambda = ilambda, .isigma = isigma ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta[, 1] = eta2theta(eta[, 1], .llambda, earg = .elambda)
- eta[, 2] = eta2theta(eta[, 2], .lmu, earg = .emu)
- eta[, 3] = eta2theta(eta[, 3], .lsigma, earg = .esigma)
- qtplot.lms.bcg(percentiles= .percentiles, eta = eta)
- }, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
- .elambda = elambda, .emu = emu, .esigma = esigma,
- .percentiles = percentiles ))),
- last = eval(substitute(expression({
- misc$percentiles = .percentiles
- misc$link = c(lambda = .llambda, mu = .lmu, sigma = .lsigma)
- misc$earg = list(lambda = .elambda, mu = .emu, sigma = .esigma)
- misc$true.mu = FALSE # $fitted is not a true mu
- if (control$cdf) {
- post$cdf = cdf.lms.bcg(y, eta0=matrix(c(lambda,mymu,sigma),
- ncol=3, dimnames = list(dimnames(x)[[1]], NULL)))
- }
- }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
- .elambda = elambda, .emu = emu, .esigma = esigma,
- .percentiles = percentiles ))),
- loglikelihood = eval(substitute(
- function(mu,y, w, residuals= FALSE, eta, extra = NULL) {
- lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
- mu = eta2theta(eta[, 2], .lmu, earg = .emu)
- sigma = eta2theta(eta[, 3], .lsigma, earg = .esigma)
- Gee = (y / mu)^lambda
- theta = 1 / (sigma * lambda)^2
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * (log(abs(lambda)) + theta * (log(theta) +
- log(Gee)-Gee) - lgamma(theta) - log(y)))
- }, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
- .elambda = elambda, .emu = emu, .esigma = esigma ))),
- vfamily = c("lms.bcg", "lmscreg"),
- deriv = eval(substitute(expression({
- lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
- mymu = eta2theta(eta[, 2], .lmu, earg = .emu)
- sigma = eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma,
+ .dfmu.init = dfmu.init,
+ .dfsigma.init = dfsigma.init,
+ .ilambda = ilambda, .isigma = isigma ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta[, 1] = eta2theta(eta[, 1], .llambda, earg = .elambda)
+ eta[, 2] = eta2theta(eta[, 2], .lmu, earg = .emu)
+ eta[, 3] = eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ qtplot.lms.bcg(percentiles= .percentiles, eta = eta)
+ }, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma,
+ .percentiles = percentiles ))),
+ last = eval(substitute(expression({
+ misc$link = c(lambda = .llambda, mu = .lmu, sigma = .lsigma)
- Gee = (y / mymu)^lambda
+ misc$earg = list(lambda = .elambda, mu = .emu, sigma = .esigma)
+
+ misc$percentiles = .percentiles
+ misc$true.mu = FALSE # $fitted is not a true mu
+ if (control$cdf) {
+ post$cdf = cdf.lms.bcg(y, eta0=matrix(c(lambda,mymu,sigma),
+ ncol=3, dimnames = list(dimnames(x)[[1]], NULL)))
+ }
+ }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma,
+ .percentiles = percentiles ))),
+ loglikelihood = eval(substitute(
+ function(mu,y, w, residuals= FALSE, eta, extra = NULL) {
+ lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
+ mu = eta2theta(eta[, 2], .lmu, earg = .emu)
+ sigma = eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ Gee = (y / mu)^lambda
theta = 1 / (sigma * lambda)^2
- dd = digamma(theta)
-
- dl.dlambda = (1 + 2 * theta * (dd + Gee -1 -log(theta) -
- 0.5 * (Gee + 1) * log(Gee))) / lambda
- dl.dmu = lambda * theta * (Gee-1) / mymu
- dl.dsigma = 2*theta*(dd + Gee - log(theta * Gee)-1) / sigma
- dlambda.deta = dtheta.deta(lambda, link = .llambda, earg = .elambda)
- dmu.deta = dtheta.deta(mymu, link = .lmu, earg = .emu)
- dsigma.deta = dtheta.deta(sigma, link = .lsigma, earg = .esigma)
-
- cbind(dl.dlambda * dlambda.deta,
- dl.dmu * dmu.deta,
- dl.dsigma * dsigma.deta) * w
- }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
- .elambda = elambda, .emu = emu, .esigma = esigma ))),
- weight = eval(substitute(expression({
- tritheta = trigamma(theta)
- wz = matrix(0, n, 6)
-
- if (TRUE) {
- part2 = dd + 2/theta - 2*log(theta)
- wz[,iam(1,1,M)] = ((1 + theta*(tritheta*(1+4*theta) -
- 4*(1+1/theta) - log(theta)*(2/theta -
- log(theta)) + dd*part2)) / lambda^2) *
- dlambda.deta^2
- } else {
- temp = mean( Gee*(log(Gee))^2 )
- wz[,iam(1,1,M)] = ((4 * theta * (theta * tritheta-1) - 1 +
- theta*temp) / lambda^2) * dlambda.deta^2
- }
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(c(w) * (log(abs(lambda)) + theta * (log(theta) +
+ log(Gee)-Gee) - lgamma(theta) - log(y)))
+ }, list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma ))),
+ vfamily = c("lms.bcg", "lmscreg"),
+ deriv = eval(substitute(expression({
+ lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
+ mymu = eta2theta(eta[, 2], .lmu, earg = .emu)
+ sigma = eta2theta(eta[, 3], .lsigma, earg = .esigma)
+
+ Gee = (y / mymu)^lambda
+ theta = 1 / (sigma * lambda)^2
+ dd = digamma(theta)
+
+ dl.dlambda = (1 + 2 * theta * (dd + Gee -1 -log(theta) -
+ 0.5 * (Gee + 1) * log(Gee))) / lambda
+ dl.dmu = lambda * theta * (Gee-1) / mymu
+ dl.dsigma = 2*theta*(dd + Gee - log(theta * Gee)-1) / sigma
+
+ dlambda.deta = dtheta.deta(lambda, link = .llambda, earg = .elambda)
+ dmu.deta = dtheta.deta(mymu, link = .lmu, earg = .emu)
+ dsigma.deta = dtheta.deta(sigma, link = .lsigma, earg = .esigma)
+
+ cbind(dl.dlambda * dlambda.deta,
+ dl.dmu * dmu.deta,
+ dl.dsigma * dsigma.deta) * w
+ }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma ))),
+ weight = eval(substitute(expression({
+ tritheta = trigamma(theta)
+ wz = matrix(0, n, 6)
+
+ if (TRUE) {
+ part2 = dd + 2/theta - 2*log(theta)
+ wz[,iam(1, 1, M)] = ((1 + theta*(tritheta*(1+4*theta) -
+ 4*(1+1/theta) - log(theta)*(2/theta -
+ log(theta)) + dd*part2)) / lambda^2) *
+ dlambda.deta^2
+ } else {
+ temp = mean( Gee*(log(Gee))^2 )
+ wz[,iam(1, 1, M)] = ((4 * theta * (theta * tritheta-1) - 1 +
+ theta*temp) / lambda^2) * dlambda.deta^2
+ }
- wz[,iam(2,2,M)] = dmu.deta^2 / (mymu * sigma)^2
- wz[,iam(3,3,M)] = (4 * theta * (theta * tritheta - 1) / sigma^2) *
- dsigma.deta^2
- wz[,iam(1,2,M)] = (-theta * (dd + 1 / theta - log(theta)) / mymu) *
- dlambda.deta * dmu.deta
- wz[,iam(1,3,M)] = 2 * theta^1.5 * (2 * theta * tritheta - 2 -
- 1 / theta) * dlambda.deta * dsigma.deta
- c(w) * wz
- }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
- .elambda = elambda, .emu = emu, .esigma = esigma ))))
+ wz[,iam(2, 2, M)] = dmu.deta^2 / (mymu * sigma)^2
+ wz[,iam(3, 3, M)] = (4 * theta * (theta * tritheta - 1) / sigma^2) *
+ dsigma.deta^2
+ wz[,iam(1, 2, M)] = (-theta * (dd + 1 / theta - log(theta)) / mymu) *
+ dlambda.deta * dmu.deta
+ wz[,iam(1, 3, M)] = 2 * theta^1.5 * (2 * theta * tritheta - 2 -
+ 1 / theta) * dlambda.deta * dsigma.deta
+ c(w) * wz
+ }), list( .llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma ))))
}
-dy.dpsi.yeojohnson = function(psi, lambda) {
+
+
+dy.dpsi.yeojohnson <- function(psi, lambda) {
L = max(length(psi), length(lambda))
psi = rep(psi, length.out = L);
@@ -354,7 +373,7 @@ dy.dpsi.yeojohnson = function(psi, lambda) {
}
-dyj.dy.yeojohnson = function(y, lambda) {
+dyj.dy.yeojohnson <- function(y, lambda) {
L = max(length(y), length(lambda))
y = rep(y, length.out = L);
lambda = rep(lambda, length.out = L);
@@ -363,7 +382,7 @@ dyj.dy.yeojohnson = function(y, lambda) {
}
- yeo.johnson = function(y, lambda, derivative = 0,
+ yeo.johnson <- function(y, lambda, derivative = 0,
epsilon = sqrt(.Machine$double.eps),
inverse = FALSE)
{
@@ -426,7 +445,7 @@ dyj.dy.yeojohnson = function(y, lambda) {
}
-dpsi.dlambda.yjn = function(psi, lambda, mymu, sigma,
+dpsi.dlambda.yjn <- function(psi, lambda, mymu, sigma,
derivative = 0, smallno=1.0e-8) {
if (!is.Numeric(derivative, allowable.length = 1,
@@ -458,11 +477,12 @@ dpsi.dlambda.yjn = function(psi, lambda, mymu, sigma,
pos = (CC & abs(lambda) <= smallno) | (!CC & abs(lambda-2) <= smallno)
if (any(pos))
- answer[pos,1+derivative] = (answer[pos, 1]^(1+derivative))/(derivative+1)
+ answer[pos,1+derivative] =
+ (answer[pos, 1]^(1+derivative))/(derivative+1)
answer
}
-gh.weight.yjn.11 = function(z, lambda, mymu, sigma, derivmat = NULL) {
+gh.weight.yjn.11 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
if (length(derivmat)) {
@@ -481,7 +501,7 @@ gh.weight.yjn.11 = function(z, lambda, mymu, sigma, derivmat = NULL) {
}
-gh.weight.yjn.12 = function(z, lambda, mymu, sigma, derivmat = NULL) {
+gh.weight.yjn.12 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
if (length(derivmat)) {
(-derivmat[, 2]) / (sqrt(pi) * sigma^2)
} else {
@@ -492,7 +512,7 @@ gh.weight.yjn.12 = function(z, lambda, mymu, sigma, derivmat = NULL) {
}
-gh.weight.yjn.13 = function(z, lambda, mymu, sigma, derivmat = NULL) {
+gh.weight.yjn.13 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
if (length(derivmat)) {
sqrt(8 / pi) * (-derivmat[, 2]) * z / sigma^2
} else {
@@ -505,7 +525,7 @@ gh.weight.yjn.13 = function(z, lambda, mymu, sigma, derivmat = NULL) {
}
-glag.weight.yjn.11 = function(z, lambda, mymu, sigma, derivmat = NULL) {
+glag.weight.yjn.11 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
if (length(derivmat)) {
@@ -517,11 +537,12 @@ glag.weight.yjn.11 = function(z, lambda, mymu, sigma, derivmat = NULL) {
(1 / sqrt(pi)) *
(dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative = 1)[, 2]^2 +
(psi - mymu) *
- dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative = 2)[, 3]) / sigma^2
+ dpsi.dlambda.yjn(psi, lambda, mymu,
+ sigma, derivative = 2)[, 3]) / sigma^2
}
}
-glag.weight.yjn.12 = function(z, lambda, mymu, sigma, derivmat = NULL) {
+glag.weight.yjn.12 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
discontinuity = -mymu / (sqrt(2) * sigma)
if (length(derivmat)) {
derivmat[, 4] * (-derivmat[, 2])
@@ -529,11 +550,12 @@ glag.weight.yjn.12 = function(z, lambda, mymu, sigma, derivmat = NULL) {
psi = mymu + sqrt(2) * sigma * z
(1 / (2 * sqrt((z-discontinuity^2)^2 + discontinuity^2))) *
(1 / sqrt(pi)) *
- (- dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative = 1)[, 2]) / sigma^2
+ (- dpsi.dlambda.yjn(psi, lambda, mymu,
+ sigma, derivative = 1)[, 2]) / sigma^2
}
}
-glag.weight.yjn.13 = function(z, lambda, mymu, sigma, derivmat = NULL) {
+glag.weight.yjn.13 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
if (length(derivmat)) {
derivmat[, 4] * (-derivmat[, 2]) * sqrt(8) * z
} else {
@@ -541,13 +563,14 @@ glag.weight.yjn.13 = function(z, lambda, mymu, sigma, derivmat = NULL) {
discontinuity = -mymu / (sqrt(2) * sigma)
(1 / (2 * sqrt((z-discontinuity^2)^2 + discontinuity^2))) *
(1 / sqrt(pi)) *
- (-2 * dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative = 1)[, 2]) *
+ (-2 * dpsi.dlambda.yjn(psi, lambda, mymu,
+ sigma, derivative = 1)[, 2]) *
(psi - mymu) / sigma^3
}
}
-gleg.weight.yjn.11 = function(z, lambda, mymu, sigma, derivmat = NULL) {
+gleg.weight.yjn.11 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
@@ -564,7 +587,7 @@ gleg.weight.yjn.11 = function(z, lambda, mymu, sigma, derivmat = NULL) {
}
}
-gleg.weight.yjn.12 = function(z, lambda, mymu, sigma, derivmat = NULL) {
+gleg.weight.yjn.12 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
if (length(derivmat)) {
derivmat[, 4] * (- derivmat[, 2])
} else {
@@ -575,7 +598,7 @@ gleg.weight.yjn.12 = function(z, lambda, mymu, sigma, derivmat = NULL) {
}
}
-gleg.weight.yjn.13 = function(z, lambda, mymu, sigma, derivmat = NULL) {
+gleg.weight.yjn.13 <- function(z, lambda, mymu, sigma, derivmat = NULL) {
if (length(derivmat)) {
derivmat[, 4] * (-derivmat[, 2]) * sqrt(8) * z
} else {
@@ -593,122 +616,130 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
list(save.weight=save.weight)
}
- lms.yjn2 = function(percentiles = c(25, 50, 75),
- zero = c(1,3),
- llambda = "identity",
- lmu = "identity",
- lsigma = "loge",
- elambda = list(), emu = list(), esigma = list(),
- dfmu.init=4,
- dfsigma.init = 2,
- ilambda=1.0,
- isigma = NULL,
- yoffset = NULL,
- nsimEIM = 250)
+ lms.yjn2 <- function(percentiles = c(25, 50, 75),
+ zero = c(1, 3),
+ llambda = "identity",
+ lmu = "identity",
+ lsigma = "loge",
+ dfmu.init=4,
+ dfsigma.init = 2,
+ ilambda=1.0,
+ isigma = NULL,
+ yoffset = NULL,
+ nsimEIM = 250)
{
- if (mode(llambda) != "character" && mode(llambda) != "name")
- llambda = as.character(substitute(llambda))
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu = as.character(substitute(lmu))
- if (mode(lsigma) != "character" && mode(lsigma) != "name")
- lsigma = as.character(substitute(lsigma))
+ llambda <- as.list(substitute(llambda))
+ elambda <- link2list(llambda)
+ llambda <- attr(elambda, "function.name")
- if (!is.list(elambda)) elambda = list()
- if (!is.list(emu)) emu = list()
- if (!is.list(esigma)) esigma = list()
- if (!is.Numeric(ilambda))
- stop("bad input for argument 'ilambda'")
- if (length(isigma) &&
- !is.Numeric(isigma, positive = TRUE))
- stop("bad input for argument 'isigma'")
+ lmu <- as.list(substitute(lmu))
+ emu <- link2list(lmu)
+ lmu <- attr(emu, "function.name")
- new("vglmff",
- blurb = c("LMS Quantile Regression (Yeo-Johnson transformation",
- " to normality)\n",
- "Links: ",
- namesof("lambda", link = llambda, earg = elambda),
- ", ",
- namesof("mu", link = lmu, earg = emu),
- ", ",
- namesof("sigma", link = lsigma, earg = esigma)),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list(.zero=zero))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("lambda", .llambda, earg = .elambda, short= TRUE),
- namesof("mu", .lmu, earg = .emu, short= TRUE),
- namesof("sigma", .lsigma, earg = .esigma, short= TRUE))
-
- y.save = y
- yoff = if (is.Numeric( .yoffset)) .yoffset else -median(y)
- extra$yoffset = yoff
- y = y + yoff
+ lsigma <- as.list(substitute(lsigma))
+ esigma <- link2list(lsigma)
+ lsigma <- attr(esigma, "function.name")
- if (!length(etastart)) {
- lambda.init = if (is.Numeric( .ilambda )) .ilambda else 1.
- y.tx = yeo.johnson(y, lambda.init)
- fv.init =
- if (smoothok <-
- (length(unique(sort(x[, min(ncol(x), 2)]))) > 7)) {
- fit700 = vsmooth.spline(x = x[, min(ncol(x), 2)],
- y=y.tx, w = w, df = .dfmu.init)
- c(predict(fit700, x = x[, min(ncol(x), 2)])$y)
- } else {
- rep(weighted.mean(y, w), length.out = n)
- }
- sigma.init = if (!is.Numeric(.isigma)) {
- if (is.Numeric( .dfsigma.init) && smoothok) {
- fit710 = vsmooth.spline(x = x[, min(ncol(x), 2)],
- y = (y.tx - fv.init)^2,
- w = w, df = .dfsigma.init)
- sqrt(c(abs(predict(fit710,
- x = x[, min(ncol(x), 2)])$y)))
- } else {
- sqrt( sum( w * (y.tx - fv.init)^2 ) / sum(w) )
- }
- } else
- .isigma
-
- etastart = matrix(0, n, 3)
- etastart[, 1] = theta2eta(lambda.init, .llambda, earg = .elambda)
- etastart[, 2] = theta2eta(fv.init, .lmu, earg = .emu)
- etastart[, 3] = theta2eta(sigma.init, .lsigma, earg = .esigma)
+ if (!is.Numeric(ilambda))
+ stop("bad input for argument 'ilambda'")
+ if (length(isigma) &&
+ !is.Numeric(isigma, positive = TRUE))
+ stop("bad input for argument 'isigma'")
+
+ new("vglmff",
+ blurb = c("LMS Quantile Regression (Yeo-Johnson transformation",
+ " to normality)\n",
+ "Links: ",
+ namesof("lambda", link = llambda, earg = elambda),
+ ", ",
+ namesof("mu", link = lmu, earg = emu),
+ ", ",
+ namesof("sigma", link = lsigma, earg = esigma)),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list(.zero = zero))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+ predictors.names <-
+ c(namesof("lambda", .llambda, earg = .elambda, short= TRUE),
+ namesof("mu", .lmu, earg = .emu, short= TRUE),
+ namesof("sigma", .lsigma, earg = .esigma, short= TRUE))
+
+ y.save = y
+ yoff = if (is.Numeric( .yoffset)) .yoffset else -median(y)
+ extra$yoffset = yoff
+ y = y + yoff
+
+ if (!length(etastart)) {
+ lambda.init = if (is.Numeric( .ilambda )) .ilambda else 1.
+
+ y.tx = yeo.johnson(y, lambda.init)
+ fv.init =
+ if (smoothok <-
+ (length(unique(sort(x[, min(ncol(x), 2)]))) > 7)) {
+ fit700 = vsmooth.spline(x = x[, min(ncol(x), 2)],
+ y=y.tx, w = w, df = .dfmu.init)
+ c(predict(fit700, x = x[, min(ncol(x), 2)])$y)
+ } else {
+ rep(weighted.mean(y, w), length.out = n)
}
- }), list(.llambda = llambda, .lmu = lmu, .lsigma = lsigma,
- .elambda = elambda, .emu = emu, .esigma = esigma,
- .dfmu.init = dfmu.init,
- .dfsigma.init = dfsigma.init,
- .ilambda = ilambda,
- .yoffset=yoffset,
- .isigma = isigma))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta[, 1] = eta2theta(eta[, 1], .llambda, earg = .elambda)
- eta[, 3] = eta2theta(eta[, 3], .lsigma, earg = .esigma)
- qtplot.lms.yjn(percentiles = .percentiles, eta = eta,
- yoffset = extra$yoff)
- }, list(.percentiles = percentiles,
- .esigma = esigma, .elambda = elambda,
- .llambda = llambda,
- .lsigma = lsigma))),
- last = eval(substitute(expression({
- misc$expected = TRUE
- misc$nsimEIM = .nsimEIM
- misc$percentiles = .percentiles
- misc$link = c(lambda = .llambda, mu = .lmu, sigma = .lsigma)
- misc$earg = list(lambda = .elambda, mu = .emu, sigma = .esigma)
- misc$true.mu = FALSE # $fitted is not a true mu
- misc[["yoffset"]] = extra$yoffset
-
- y = y.save # Restore back the value; to be attached to object
-
- if (control$cdf) {
+
+ sigma.init = if (!is.Numeric(.isigma)) {
+ if (is.Numeric( .dfsigma.init) && smoothok) {
+ fit710 = vsmooth.spline(x = x[, min(ncol(x), 2)],
+ y = (y.tx - fv.init)^2,
+ w = w, df = .dfsigma.init)
+ sqrt(c(abs(predict(fit710,
+ x = x[, min(ncol(x), 2)])$y)))
+ } else {
+ sqrt( sum( w * (y.tx - fv.init)^2 ) / sum(w) )
+ }
+ } else
+ .isigma
+
+ etastart = matrix(0, n, 3)
+ etastart[, 1] = theta2eta(lambda.init, .llambda, earg = .elambda)
+ etastart[, 2] = theta2eta(fv.init, .lmu, earg = .emu)
+ etastart[, 3] = theta2eta(sigma.init, .lsigma, earg = .esigma)
+
+ }
+ }), list(.llambda = llambda, .lmu = lmu, .lsigma = lsigma,
+ .elambda = elambda, .emu = emu, .esigma = esigma,
+ .dfmu.init = dfmu.init,
+ .dfsigma.init = dfsigma.init,
+ .ilambda = ilambda,
+ .yoffset=yoffset,
+ .isigma = isigma))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta[, 1] = eta2theta(eta[, 1], .llambda, earg = .elambda)
+ eta[, 3] = eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ qtplot.lms.yjn(percentiles = .percentiles, eta = eta,
+ yoffset = extra$yoff)
+ }, list(.percentiles = percentiles,
+ .esigma = esigma, .elambda = elambda,
+ .llambda = llambda,
+ .lsigma = lsigma))),
+ last = eval(substitute(expression({
+ misc$link = c(lambda = .llambda, mu = .lmu, sigma = .lsigma)
+ misc$earg = list(lambda = .elambda, mu = .emu, sigma = .esigma)
+
+ misc$expected = TRUE
+ misc$nsimEIM = .nsimEIM
+ misc$percentiles = .percentiles
+
+ misc$true.mu = FALSE # $fitted is not a true mu
+ misc[["yoffset"]] = extra$yoffset
+
+ y = y.save # Restore back the value; to be attached to object
+
+ if (control$cdf) {
post$cdf = cdf.lms.yjn(y + misc$yoffset,
eta0=matrix(c(lambda,mymu,sigma),
ncol=3, dimnames = list(dimnames(x)[[1]], NULL)))
@@ -725,70 +756,69 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
psi = yeo.johnson(y, lambda)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
- sum(w * (-log(sigma) - 0.5 * ((psi-mu)/sigma)^2 +
+ sum(c(w) * (-log(sigma) - 0.5 * ((psi-mu)/sigma)^2 +
(lambda-1) * sign(y) * log1p(abs(y))))
}, list( .elambda = elambda, .emu = emu, .esigma = esigma,
.llambda = llambda, .lmu = lmu,
.lsigma = lsigma ))),
- vfamily = c("lms.yjn2", "lmscreg"),
- deriv = eval(substitute(expression({
- lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
- mymu = eta2theta(eta[, 2], .lmu, earg = .emu)
- sigma = eta2theta(eta[, 3], .lsigma, earg = .esigma)
- dlambda.deta = dtheta.deta(lambda, link = .llambda, earg = .elambda)
- dmu.deta = dtheta.deta(mymu, link = .lmu, earg = .emu)
- dsigma.deta = dtheta.deta(sigma, link = .lsigma, earg = .esigma)
-
- psi = yeo.johnson(y, lambda)
- d1 = yeo.johnson(y, lambda, deriv = 1)
+ vfamily = c("lms.yjn2", "lmscreg"),
+ deriv = eval(substitute(expression({
+ lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
+ mymu = eta2theta(eta[, 2], .lmu, earg = .emu)
+ sigma = eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ dlambda.deta = dtheta.deta(lambda, link = .llambda, earg = .elambda)
+ dmu.deta = dtheta.deta(mymu, link = .lmu, earg = .emu)
+ dsigma.deta = dtheta.deta(sigma, link = .lsigma, earg = .esigma)
+
+ psi = yeo.johnson(y, lambda)
+ d1 = yeo.johnson(y, lambda, deriv = 1)
+ AA = (psi - mymu) / sigma
+ dl.dlambda = -AA * d1 /sigma + sign(y) * log1p(abs(y))
+ dl.dmu = AA / sigma
+ dl.dsigma = (AA^2 -1) / sigma
+ dthetas.detas = cbind(dlambda.deta, dmu.deta, dsigma.deta)
+ c(w) * cbind(dl.dlambda, dl.dmu, dl.dsigma) * dthetas.detas
+ }), list( .elambda = elambda, .emu = emu, .esigma = esigma,
+ .llambda = llambda, .lmu = lmu,
+ .lsigma = lsigma ))),
+ weight = eval(substitute(expression({
+
+
+ run.varcov = 0
+ ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
+ for(ii in 1:( .nsimEIM )) {
+ psi = rnorm(n, mymu, sigma)
+ ysim = yeo.johnson(y=psi, lam=lambda, inv = TRUE)
+ d1 = yeo.johnson(ysim, lambda, deriv = 1)
AA = (psi - mymu) / sigma
- dl.dlambda = -AA * d1 /sigma + sign(y) * log1p(abs(y))
+ dl.dlambda = -AA * d1 /sigma + sign(ysim) * log1p(abs(ysim))
dl.dmu = AA / sigma
dl.dsigma = (AA^2 -1) / sigma
- dthetas.detas = cbind(dlambda.deta, dmu.deta, dsigma.deta)
- c(w) * cbind(dl.dlambda, dl.dmu, dl.dsigma) * dthetas.detas
- }), list( .elambda = elambda, .emu = emu, .esigma = esigma,
- .llambda = llambda, .lmu = lmu,
- .lsigma = lsigma ))),
- weight = eval(substitute(expression({
-
-
- run.varcov = 0
- ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
- for(ii in 1:( .nsimEIM )) {
- psi = rnorm(n, mymu, sigma)
- ysim = yeo.johnson(y=psi, lam=lambda, inv = TRUE)
- d1 = yeo.johnson(ysim, lambda, deriv = 1)
- AA = (psi - mymu) / sigma
- dl.dlambda = -AA * d1 /sigma + sign(ysim) * log1p(abs(ysim))
- dl.dmu = AA / sigma
- dl.dsigma = (AA^2 -1) / sigma
- rm(ysim)
- temp3 = cbind(dl.dlambda, dl.dmu, dl.dsigma)
- run.varcov = ((ii-1) * run.varcov +
- temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
- }
+ rm(ysim)
+ temp3 = cbind(dl.dlambda, dl.dmu, dl.dsigma)
+ run.varcov = ((ii-1) * run.varcov +
+ temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
+ }
if (intercept.only)
run.varcov = matrix(colMeans(run.varcov),
nr=n, nc=ncol(run.varcov), byrow = TRUE)
- wz = run.varcov * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
- dimnames(wz) = list(rownames(wz), NULL) # Remove the colnames
- c(w) * wz
- }), list(.lsigma = lsigma,
- .esigma = esigma, .elambda = elambda,
- .nsimEIM=nsimEIM,
- .llambda = llambda))))
+ wz = run.varcov * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
+ dimnames(wz) = list(rownames(wz), NULL) # Remove the colnames
+ c(w) * wz
+ }), list(.lsigma = lsigma,
+ .esigma = esigma, .elambda = elambda,
+ .nsimEIM=nsimEIM,
+ .llambda = llambda))))
}
lms.yjn <- function(percentiles = c(25, 50, 75),
- zero = c(1,3),
+ zero = c(1, 3),
llambda = "identity",
lsigma = "loge",
- elambda = list(), esigma = list(),
dfmu.init=4,
dfsigma.init = 2,
ilambda=1.0,
@@ -800,43 +830,51 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
- if (mode(lsigma) != "character" && mode(lsigma) != "name")
- lsigma = as.character(substitute(lsigma))
- if (mode(llambda) != "character" && mode(llambda) != "name")
- llambda = as.character(substitute(llambda))
- if (!is.list(elambda)) elambda = list()
- if (!is.list(esigma)) esigma = list()
- rule = rule[1] # Number of points (common) for all the quadrature schemes
- if (rule != 5 && rule != 10)
- stop("only rule=5 or 10 is supported")
+ llambda <- as.list(substitute(llambda))
+ elambda <- link2list(llambda)
+ llambda <- attr(elambda, "function.name")
- new("vglmff",
- blurb = c("LMS Quantile Regression ",
- "(Yeo-Johnson transformation to normality)\n",
- "Links: ",
- namesof("lambda", link = llambda, earg = elambda),
- ", mu, ",
- namesof("sigma", link = lsigma, earg = esigma)),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list(.zero=zero))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("lambda", .llambda, earg = .elambda, short= TRUE),
+ lsigma <- as.list(substitute(lsigma))
+ esigma <- link2list(lsigma)
+ lsigma <- attr(esigma, "function.name")
+
+
+
+ rule = rule[1] # Number of points (common) for all the quadrature schemes
+ if (rule != 5 && rule != 10)
+ stop("only rule=5 or 10 is supported")
+
+ new("vglmff",
+ blurb = c("LMS Quantile Regression ",
+ "(Yeo-Johnson transformation to normality)\n",
+ "Links: ",
+ namesof("lambda", link = llambda, earg = elambda),
+ ", mu, ",
+ namesof("sigma", link = lsigma, earg = esigma)),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list(.zero = zero))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+
+ predictors.names <-
+ c(namesof("lambda", .llambda, earg = .elambda, short= TRUE),
"mu",
- namesof("sigma", .lsigma, earg = .esigma, short= TRUE))
+ namesof("sigma", .lsigma, earg = .esigma, short= TRUE))
- y.save = y
- yoff = if (is.Numeric( .yoffset )) .yoffset else -median(y)
- extra$yoffset = yoff
- y = y + yoff
+ y.save = y
+ yoff = if (is.Numeric( .yoffset )) .yoffset else -median(y)
+ extra$yoffset = yoff
+ y = y + yoff
- if (!length(etastart)) {
+ if (!length(etastart)) {
- lambda.init = if (is.Numeric( .ilambda )) .ilambda else 1.0
+ lambda.init = if (is.Numeric( .ilambda )) .ilambda else 1.0
y.tx = yeo.johnson(y, lambda.init)
if (smoothok <-
@@ -868,229 +906,233 @@ lms.yjn2.control <- function(save.weight = TRUE, ...)
theta2eta(sigma.init, .lsigma, earg = .esigma))
}
- }), list(.lsigma = lsigma,
- .llambda = llambda,
- .esigma = esigma, .elambda = elambda,
- .dfmu.init = dfmu.init,
- .dfsigma.init = dfsigma.init,
- .ilambda = ilambda,
- .yoffset=yoffset,
- .isigma = isigma))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta[, 1] = eta2theta(eta[, 1], .llambda, earg = .elambda)
- eta[, 3] = eta2theta(eta[, 3], .lsigma, earg = .esigma)
- qtplot.lms.yjn(percentiles = .percentiles,
- eta = eta, yoffset = extra$yoff)
- }, list(.percentiles = percentiles,
- .esigma = esigma,
- .elambda = elambda,
- .llambda = llambda,
- .lsigma = lsigma))),
- last = eval(substitute(expression({
- misc$percentiles = .percentiles
- misc$link = c(lambda = .llambda, mu = "identity", sigma = .lsigma)
- misc$earg = list(lambda = .elambda, mu = list(), sigma = .esigma)
- misc$true.mu = FALSE # $fitted is not a true mu
- misc[["yoffset"]] = extra$yoff
-
- y = y.save # Restore back the value; to be attached to object
-
- if (control$cdf) {
- post$cdf =
- cdf.lms.yjn(y + misc$yoffset,
- eta0 = matrix(c(lambda,mymu,sigma),
- ncol = 3,
- dimnames = list(dimnames(x)[[1]], NULL)))
- }
- }), list(.percentiles = percentiles,
- .esigma = esigma, .elambda = elambda,
- .llambda = llambda,
- .lsigma = lsigma))),
- loglikelihood = eval(substitute(
- function(mu,y, w, residuals= FALSE, eta, extra = NULL) {
- lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
- mu = eta[, 2]
- sigma = eta2theta(eta[, 3], .lsigma, earg = .esigma)
- psi = yeo.johnson(y, lambda)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * (-log(sigma) - 0.5 * ((psi-mu)/sigma)^2 +
- (lambda-1) * sign(y) * log1p(abs(y))))
- }, list( .esigma = esigma, .elambda = elambda,
- .lsigma = lsigma, .llambda = llambda))),
- vfamily = c("lms.yjn", "lmscreg"),
- deriv = eval(substitute(expression({
- lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
- mymu = eta[, 2]
- sigma = eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ }), list(.lsigma = lsigma,
+ .llambda = llambda,
+ .esigma = esigma, .elambda = elambda,
+ .dfmu.init = dfmu.init,
+ .dfsigma.init = dfsigma.init,
+ .ilambda = ilambda,
+ .yoffset=yoffset,
+ .isigma = isigma))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta[, 1] = eta2theta(eta[, 1], .llambda, earg = .elambda)
+ eta[, 3] = eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ qtplot.lms.yjn(percentiles = .percentiles,
+ eta = eta, yoffset = extra$yoff)
+ }, list(.percentiles = percentiles,
+ .esigma = esigma,
+ .elambda = elambda,
+ .llambda = llambda,
+ .lsigma = lsigma))),
+ last = eval(substitute(expression({
+ misc$link = c(lambda = .llambda, mu = "identity",
+ sigma = .lsigma)
- psi = yeo.johnson(y, lambda)
- d1 = yeo.johnson(y, lambda, deriv = 1)
- AA = (psi - mymu) / sigma
+ misc$earg = list(lambda = .elambda, mu = list(theta = NULL),
+ sigma = .esigma)
- dl.dlambda = -AA * d1 /sigma + sign(y) * log1p(abs(y))
- dl.dmu = AA / sigma
- dl.dsigma = (AA^2 -1) / sigma
- dlambda.deta = dtheta.deta(lambda, link = .llambda, earg = .elambda)
- dsigma.deta = dtheta.deta(sigma, link = .lsigma, earg = .esigma)
-
- cbind(dl.dlambda * dlambda.deta,
- dl.dmu,
- dl.dsigma * dsigma.deta) * w
- }), list( .esigma = esigma, .elambda = elambda,
- .lsigma = lsigma, .llambda = llambda ))),
- weight = eval(substitute(expression({
- wz = matrix(0, n, 6)
-
-
- wz[,iam(2,2,M)] = 1 / sigma^2
- wz[,iam(3,3,M)] = 2 * wz[,iam(2,2,M)] # 2 / sigma^2
-
-
- if (.rule == 10) {
- glag.abs = c(0.13779347054,0.729454549503,
- 1.80834290174,3.40143369785,
- 5.55249614006,8.33015274676,
- 11.8437858379,16.2792578314,
- 21.996585812, 29.9206970123)
- glag.wts = c(0.308441115765, 0.401119929155, 0.218068287612,
- 0.0620874560987, 0.00950151697517, 0.000753008388588,
- 2.82592334963e-5,
- 4.24931398502e-7, 1.83956482398e-9, 9.91182721958e-13)
- } else {
- glag.abs = c(0.2635603197180449, 1.4134030591060496,
- 3.5964257710396850,
- 7.0858100058570503, 12.6408008442729685)
- glag.wts = c(5.217556105826727e-01,3.986668110832433e-01,
- 7.594244968176882e-02,
- 3.611758679927785e-03, 2.336997238583738e-05)
- }
+ misc$percentiles = .percentiles
+ misc$true.mu = FALSE # $fitted is not a true mu
+ misc[["yoffset"]] = extra$yoff
- if (.rule == 10) {
- sgh.abs = c(0.03873852801690856, 0.19823332465268367,
- 0.46520116404433082,
- 0.81686197962535023, 1.23454146277833154,
- 1.70679833036403172,
- 2.22994030591819214, 2.80910399394755972,
- 3.46387269067033854,
- 4.25536209637269280)
- sgh.wts = c(9.855210713854302e-02,2.086780884700499e-01,
- 2.520517066468666e-01,
- 1.986843323208932e-01,9.719839905023238e-02,
- 2.702440190640464e-02,
- 3.804646170194185e-03, 2.288859354675587e-04,
- 4.345336765471935e-06,
- 1.247734096219375e-08)
- } else {
- sgh.abs = c(0.1002421519682381, 0.4828139660462573,
- 1.0609498215257607,
- 1.7797294185202606, 2.6697603560875995)
- sgh.wts = c(0.2484061520284881475,0.3923310666523834311,
- 0.2114181930760276606,
- 0.0332466603513424663, 0.0008248533445158026)
- }
+ y = y.save # Restore back the value; to be attached to object
- if (.rule == 10) {
- gleg.abs = c(-0.973906528517, -0.865063366689, -0.679409568299,
- -0.433395394129, -0.148874338982)
- gleg.abs = c(gleg.abs, rev(-gleg.abs))
- gleg.wts = c(0.0666713443087, 0.149451349151, 0.219086362516,
- 0.26926671931, 0.295524224715)
- gleg.wts = c(gleg.wts, rev(gleg.wts))
- } else {
- gleg.abs = c(-0.9061798459386643,-0.5384693101056820, 0,
- 0.5384693101056828, 0.9061798459386635)
- gleg.wts = c(0.2369268850561853,0.4786286704993680,
- 0.5688888888888889,
- 0.4786286704993661, 0.2369268850561916)
- }
+ if (control$cdf) {
+ post$cdf =
+ cdf.lms.yjn(y + misc$yoffset,
+ eta0 = matrix(c(lambda,mymu,sigma),
+ ncol = 3,
+ dimnames = list(dimnames(x)[[1]], NULL)))
+ }
+ }), list(.percentiles = percentiles,
+ .esigma = esigma, .elambda = elambda,
+ .llambda = llambda,
+ .lsigma = lsigma))),
+ loglikelihood = eval(substitute(
+ function(mu,y, w, residuals= FALSE, eta, extra = NULL) {
+ lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
+ mu = eta[, 2]
+ sigma = eta2theta(eta[, 3], .lsigma, earg = .esigma)
+ psi = yeo.johnson(y, lambda)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(c(w) * (-log(sigma) - 0.5 * ((psi-mu)/sigma)^2 +
+ (lambda-1) * sign(y) * log1p(abs(y))))
+ }, list( .esigma = esigma, .elambda = elambda,
+ .lsigma = lsigma, .llambda = llambda))),
+ vfamily = c("lms.yjn", "lmscreg"),
+ deriv = eval(substitute(expression({
+ lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
+ mymu = eta[, 2]
+ sigma = eta2theta(eta[, 3], .lsigma, earg = .esigma)
+
+ psi = yeo.johnson(y, lambda)
+ d1 = yeo.johnson(y, lambda, deriv = 1)
+ AA = (psi - mymu) / sigma
+
+ dl.dlambda = -AA * d1 /sigma + sign(y) * log1p(abs(y))
+ dl.dmu = AA / sigma
+ dl.dsigma = (AA^2 -1) / sigma
+ dlambda.deta = dtheta.deta(lambda, link = .llambda, earg = .elambda)
+ dsigma.deta = dtheta.deta(sigma, link = .lsigma, earg = .esigma)
+
+ cbind(dl.dlambda * dlambda.deta,
+ dl.dmu,
+ dl.dsigma * dsigma.deta) * c(w)
+ }), list( .esigma = esigma, .elambda = elambda,
+ .lsigma = lsigma, .llambda = llambda ))),
+ weight = eval(substitute(expression({
+ wz = matrix(0, n, 6)
- discontinuity = -mymu/(sqrt(2)*sigma)
+ wz[,iam(2, 2, M)] = 1 / sigma^2
+ wz[,iam(3, 3, M)] = 2 * wz[,iam(2, 2, M)] # 2 / sigma^2
- LL = pmin(discontinuity, 0)
- UU = pmax(discontinuity, 0)
- if (FALSE) {
- AA = (UU-LL)/2
- for(kk in 1:length(gleg.wts)) {
- temp1 = AA * gleg.wts[kk]
- abscissae = (UU+LL)/2 + AA * gleg.abs[kk]
- psi = mymu + sqrt(2) * sigma * abscissae
- temp9 = dpsi.dlambda.yjn(psi, lambda, mymu, sigma,
- derivative = 2)
- temp9 = cbind(temp9, exp(-abscissae^2) / (sqrt(pi) * sigma^2))
-
- wz[,iam(1,1,M)] = wz[,iam(1,1,M)] + temp1 *
- gleg.weight.yjn.11(abscissae, lambda, mymu, sigma, temp9)
- wz[,iam(1,2,M)] = wz[,iam(1,2,M)] + temp1 *
- gleg.weight.yjn.12(abscissae, lambda, mymu, sigma, temp9)
- wz[,iam(1,3,M)] = wz[,iam(1,3,M)] + temp1 *
- gleg.weight.yjn.13(abscissae, lambda, mymu, sigma, temp9)
- }
+ if (.rule == 10) {
+ glag.abs = c(0.13779347054,0.729454549503,
+ 1.80834290174,3.40143369785,
+ 5.55249614006,8.33015274676,
+ 11.8437858379,16.2792578314,
+ 21.996585812, 29.9206970123)
+ glag.wts = c(0.308441115765, 0.401119929155, 0.218068287612,
+ 0.0620874560987, 0.00950151697517, 0.000753008388588,
+ 2.82592334963e-5,
+ 4.24931398502e-7, 1.83956482398e-9, 9.91182721958e-13)
+ } else {
+ glag.abs = c(0.2635603197180449, 1.4134030591060496,
+ 3.5964257710396850,
+ 7.0858100058570503, 12.6408008442729685)
+ glag.wts = c(5.217556105826727e-01, 3.986668110832433e-01,
+ 7.594244968176882e-02,
+ 3.611758679927785e-03, 2.336997238583738e-05)
+ }
+
+ if (.rule == 10) {
+ sgh.abs = c(0.03873852801690856, 0.19823332465268367,
+ 0.46520116404433082,
+ 0.81686197962535023, 1.23454146277833154,
+ 1.70679833036403172,
+ 2.22994030591819214, 2.80910399394755972,
+ 3.46387269067033854,
+ 4.25536209637269280)
+ sgh.wts = c(9.855210713854302e-02, 2.086780884700499e-01,
+ 2.520517066468666e-01,
+ 1.986843323208932e-01,9.719839905023238e-02,
+ 2.702440190640464e-02,
+ 3.804646170194185e-03, 2.288859354675587e-04,
+ 4.345336765471935e-06,
+ 1.247734096219375e-08)
+ } else {
+ sgh.abs = c(0.1002421519682381, 0.4828139660462573,
+ 1.0609498215257607,
+ 1.7797294185202606, 2.6697603560875995)
+ sgh.wts = c(0.2484061520284881475,0.3923310666523834311,
+ 0.2114181930760276606,
+ 0.0332466603513424663, 0.0008248533445158026)
+ }
+
+ if (.rule == 10) {
+ gleg.abs = c(-0.973906528517, -0.865063366689, -0.679409568299,
+ -0.433395394129, -0.148874338982)
+ gleg.abs = c(gleg.abs, rev(-gleg.abs))
+ gleg.wts = c(0.0666713443087, 0.149451349151, 0.219086362516,
+ 0.26926671931, 0.295524224715)
+ gleg.wts = c(gleg.wts, rev(gleg.wts))
+ } else {
+ gleg.abs = c(-0.9061798459386643,-0.5384693101056820, 0,
+ 0.5384693101056828, 0.9061798459386635)
+ gleg.wts = c(0.2369268850561853,0.4786286704993680,
+ 0.5688888888888889,
+ 0.4786286704993661, 0.2369268850561916)
+ }
+
+
+ discontinuity = -mymu/(sqrt(2)*sigma)
+
+
+ LL = pmin(discontinuity, 0)
+ UU = pmax(discontinuity, 0)
+ if (FALSE) {
+ AA = (UU-LL)/2
+ for(kk in 1:length(gleg.wts)) {
+ temp1 = AA * gleg.wts[kk]
+ abscissae = (UU+LL)/2 + AA * gleg.abs[kk]
+ psi = mymu + sqrt(2) * sigma * abscissae
+ temp9 = dpsi.dlambda.yjn(psi, lambda, mymu, sigma,
+ derivative = 2)
+ temp9 = cbind(temp9, exp(-abscissae^2) / (sqrt(pi) * sigma^2))
+
+ wz[,iam(1, 1, M)] = wz[,iam(1, 1, M)] + temp1 *
+ gleg.weight.yjn.11(abscissae, lambda, mymu, sigma, temp9)
+ wz[,iam(1, 2, M)] = wz[,iam(1, 2, M)] + temp1 *
+ gleg.weight.yjn.12(abscissae, lambda, mymu, sigma, temp9)
+ wz[,iam(1, 3, M)] = wz[,iam(1, 3, M)] + temp1 *
+ gleg.weight.yjn.13(abscissae, lambda, mymu, sigma, temp9)
+ }
} else {
- temp9 = dotFortran(name = "yjngintf", as.double(LL),
- as.double(UU),
- as.double(gleg.abs), as.double(gleg.wts), as.integer(n),
- as.integer(length(gleg.abs)), as.double(lambda),
- as.double(mymu), as.double(sigma), answer=double(3*n),
+ temp9 = dotFortran(name = "yjngintf", as.double(LL),
+ as.double(UU),
+ as.double(gleg.abs), as.double(gleg.wts), as.integer(n),
+ as.integer(length(gleg.abs)), as.double(lambda),
+ as.double(mymu), as.double(sigma), answer=double(3*n),
eps=as.double(1.0e-5))$ans
dim(temp9) = c(3,n)
- wz[,iam(1,1,M)] = temp9[1,]
- wz[,iam(1,2,M)] = temp9[2,]
- wz[,iam(1,3,M)] = temp9[3,]
+ wz[,iam(1, 1, M)] = temp9[1,]
+ wz[,iam(1, 2, M)] = temp9[2,]
+ wz[,iam(1, 3, M)] = temp9[3,]
}
- for(kk in 1:length(sgh.wts)) {
+ for(kk in 1:length(sgh.wts)) {
- abscissae = sign(-discontinuity) * sgh.abs[kk]
- psi = mymu + sqrt(2) * sigma * abscissae # abscissae = z
- temp9 = dpsi.dlambda.yjn(psi, lambda, mymu, sigma,
- derivative = 2)
- wz[,iam(1,1,M)] = wz[,iam(1,1,M)] + sgh.wts[kk] *
- gh.weight.yjn.11(abscissae, lambda, mymu, sigma, temp9)
- wz[,iam(1,2,M)] = wz[,iam(1,2,M)] + sgh.wts[kk] *
- gh.weight.yjn.12(abscissae, lambda, mymu, sigma, temp9)
- wz[,iam(1,3,M)] = wz[,iam(1,3,M)] + sgh.wts[kk] *
- gh.weight.yjn.13(abscissae, lambda, mymu, sigma, temp9)
- }
+ abscissae = sign(-discontinuity) * sgh.abs[kk]
+ psi = mymu + sqrt(2) * sigma * abscissae # abscissae = z
+ temp9 = dpsi.dlambda.yjn(psi, lambda, mymu, sigma,
+ derivative = 2)
+ wz[,iam(1, 1, M)] = wz[,iam(1, 1, M)] + sgh.wts[kk] *
+ gh.weight.yjn.11(abscissae, lambda, mymu, sigma, temp9)
+ wz[,iam(1, 2, M)] = wz[,iam(1, 2, M)] + sgh.wts[kk] *
+ gh.weight.yjn.12(abscissae, lambda, mymu, sigma, temp9)
+ wz[,iam(1, 3, M)] = wz[,iam(1, 3, M)] + sgh.wts[kk] *
+ gh.weight.yjn.13(abscissae, lambda, mymu, sigma, temp9)
+ }
- temp1 = exp(-discontinuity^2)
- for(kk in 1:length(glag.wts)) {
- abscissae = sign(discontinuity) * sqrt(glag.abs[kk]) + discontinuity^2
- psi = mymu + sqrt(2) * sigma * abscissae
- temp9 = dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative = 2)
- temp9 = cbind(temp9,
- 1 / (2 * sqrt((abscissae-discontinuity^2)^2 +
- discontinuity^2) *
- sqrt(pi) * sigma^2))
- temp7 = temp1 * glag.wts[kk]
- wz[,iam(1,1,M)] = wz[,iam(1,1,M)] + temp7 *
- glag.weight.yjn.11(abscissae, lambda, mymu, sigma, temp9)
- wz[,iam(1,2,M)] = wz[,iam(1,2,M)] + temp7 *
- glag.weight.yjn.12(abscissae, lambda, mymu, sigma, temp9)
- wz[,iam(1,3,M)] = wz[,iam(1,3,M)] + temp7 *
- glag.weight.yjn.13(abscissae, lambda, mymu, sigma, temp9)
- }
+ temp1 = exp(-discontinuity^2)
+ for(kk in 1:length(glag.wts)) {
+ abscissae = sign(discontinuity) * sqrt(glag.abs[kk]) + discontinuity^2
+ psi = mymu + sqrt(2) * sigma * abscissae
+ temp9 = dpsi.dlambda.yjn(psi, lambda, mymu, sigma, derivative = 2)
+ temp9 = cbind(temp9,
+ 1 / (2 * sqrt((abscissae-discontinuity^2)^2 +
+ discontinuity^2) *
+ sqrt(pi) * sigma^2))
+ temp7 = temp1 * glag.wts[kk]
+ wz[,iam(1, 1, M)] = wz[,iam(1, 1, M)] + temp7 *
+ glag.weight.yjn.11(abscissae, lambda, mymu, sigma, temp9)
+ wz[,iam(1, 2, M)] = wz[,iam(1, 2, M)] + temp7 *
+ glag.weight.yjn.12(abscissae, lambda, mymu, sigma, temp9)
+ wz[,iam(1, 3, M)] = wz[,iam(1, 3, M)] + temp7 *
+ glag.weight.yjn.13(abscissae, lambda, mymu, sigma, temp9)
+ }
- wz[,iam(1,1,M)] = wz[,iam(1,1,M)] * dlambda.deta^2
- wz[,iam(1,2,M)] = wz[,iam(1,2,M)] * dlambda.deta
- wz[,iam(1,3,M)] = wz[,iam(1,3,M)] * dsigma.deta * dlambda.deta
- if ( .diagW && iter <= .iters.diagW) {
- wz[,iam(1,2,M)] = wz[,iam(1,3,M)] = 0
- }
- wz[,iam(2,3,M)] = wz[,iam(2,3,M)] * dsigma.deta
- wz[,iam(3,3,M)] = wz[,iam(3,3,M)] * dsigma.deta^2
+ wz[,iam(1, 1, M)] = wz[,iam(1, 1, M)] * dlambda.deta^2
+ wz[,iam(1, 2, M)] = wz[,iam(1, 2, M)] * dlambda.deta
+ wz[,iam(1, 3, M)] = wz[,iam(1, 3, M)] * dsigma.deta * dlambda.deta
+ if ( .diagW && iter <= .iters.diagW) {
+ wz[,iam(1, 2, M)] = wz[,iam(1, 3, M)] = 0
+ }
+ wz[,iam(2, 3, M)] = wz[,iam(2, 3, M)] * dsigma.deta
+ wz[,iam(3, 3, M)] = wz[,iam(3, 3, M)] * dsigma.deta^2
c(w) * wz
- }), list(.lsigma = lsigma,
- .esigma = esigma, .elambda = elambda,
- .rule=rule,
- .diagW=diagW,
- .iters.diagW=iters.diagW,
- .llambda = llambda))))
+ }), list(.lsigma = lsigma,
+ .esigma = esigma, .elambda = elambda,
+ .rule=rule,
+ .diagW=diagW,
+ .iters.diagW=iters.diagW,
+ .llambda = llambda))))
}
@@ -1115,32 +1157,33 @@ Wr1 <- function(r, w) ifelse(r <= 0, 1, w)
Wr2 <- function(r, w) (r <= 0) * 1 + (r > 0) * w
-amlnormal.deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+amlnormal.deviance <- function(mu, y, w, residuals = FALSE,
+ eta, extra = NULL) {
- M <- length(extra$w.aml)
+ M <- length(extra$w.aml)
- if (M > 1) y = matrix(y, extra$n, extra$M)
+ if (M > 1) y = matrix(y, extra$n, extra$M)
- devi = cbind((y - mu)^2)
- if (residuals) {
- stop("not sure here")
- wz = VGAM.weights.function(w = w, M = extra$M, n = extra$n)
- return((y - mu) * sqrt(wz) * matrix(extra$w.aml,extra$n,extra$M))
- } else {
- all.deviances = numeric(M)
- myresid = matrix(y,extra$n,extra$M) - cbind(mu)
- for(ii in 1:M)
- all.deviances[ii] = sum(w * devi[,ii] *
- Wr1(myresid[,ii], w=extra$w.aml[ii]))
- }
- if (is.logical(extra$individual) && extra$individual)
- all.deviances else sum(all.deviances)
+ devi = cbind((y - mu)^2)
+ if (residuals) {
+ stop("not sure here")
+ wz = VGAM.weights.function(w = w, M = extra$M, n = extra$n)
+ return((y - mu) * sqrt(wz) * matrix(extra$w.aml,extra$n,extra$M))
+ } else {
+ all.deviances = numeric(M)
+ myresid = matrix(y,extra$n,extra$M) - cbind(mu)
+ for(ii in 1:M)
+ all.deviances[ii] = sum(c(w) * devi[, ii] *
+ Wr1(myresid[, ii], w=extra$w.aml[ii]))
+ }
+ if (is.logical(extra$individual) && extra$individual)
+ all.deviances else sum(all.deviances)
}
amlnormal <- function(w.aml = 1, parallel = FALSE,
- lexpectile = "identity", eexpectile = list(),
+ lexpectile = "identity",
iexpectile = NULL,
imethod = 1, digw = 4)
{
@@ -1153,20 +1196,22 @@ amlnormal.deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
imethod > 3)
stop("argument 'imethod' must be 1, 2 or 3")
- if (mode(lexpectile) != "character" && mode(lexpectile) != "name")
- lexpectile = as.character(substitute(lexpectile))
- if (!is.list(eexpectile)) eexpectile = list()
+
+ lexpectile <- as.list(substitute(lexpectile))
+ eexpectile <- link2list(lexpectile)
+ lexpectile <- attr(eexpectile, "function.name")
+
if (length(iexpectile) && !is.Numeric(iexpectile))
- stop("bad input for argument 'iexpectile'")
+ stop("bad input for argument 'iexpectile'")
new("vglmff",
blurb = c("Asymmetric least squares quantile regression\n\n",
"Links: ",
namesof("expectile", link = lexpectile, earg = eexpectile)),
constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints)
+ constraints = cm.vgam(matrix(1, M,1), x, .parallel, constraints)
}), list( .parallel = parallel ))),
deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
amlnormal.deviance(mu = mu, y = y, w = w, residuals = residuals,
@@ -1174,16 +1219,25 @@ amlnormal.deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
},
initialize = eval(substitute(expression({
extra$w.aml = .w.aml
- if (ncol(y <- cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
extra$M = M = length(extra$w.aml) # Recycle if necessary
extra$n = n
extra$y.names = y.names =
- paste("w.aml = ", round(extra$w.aml, digits = .digw), sep = "")
+ paste("w.aml = ", round(extra$w.aml, digits = .digw ), sep = "")
- predictors.names = c(namesof(
- paste("expectile(",y.names,")", sep = ""), .lexpectile,
+ predictors.names <- c(namesof(
+ paste("expectile(",y.names,")", sep = ""), .lexpectile ,
earg = .eexpectile, tag = FALSE))
if (!length(etastart)) {
@@ -1206,24 +1260,29 @@ amlnormal.deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
linkinv = eval(substitute(function(eta, extra = NULL) {
ans = eta = as.matrix(eta)
for(ii in 1:ncol(eta))
- ans[,ii] = eta2theta(eta[,ii], .lexpectile, earg = .eexpectile)
+ ans[, ii] = eta2theta(eta[, ii], .lexpectile, earg = .eexpectile)
dimnames(ans) = list(dimnames(eta)[[1]], extra$y.names)
ans
}, list( .lexpectile = lexpectile, .eexpectile = eexpectile ))),
last = eval(substitute(expression({
misc$link = rep(.lexpectile, length = M)
names(misc$link) = extra$y.names
+
misc$earg = vector("list", M)
+ for (ilocal in 1:M)
+ misc$earg[[ilocal]] <- list(theta = NULL)
names(misc$earg) = names(misc$link)
misc$parallel = .parallel
misc$expected = TRUE
extra$percentile = numeric(M)
+ misc$multipleResponses <- TRUE
+
for(ii in 1:M) {
use.w = if (M > 1 && ncol(cbind(w)) == M) w[, ii] else w
extra$percentile[ii] = 100 *
- weighted.mean(myresid[,ii] <= 0, use.w)
+ weighted.mean(myresid[, ii] <= 0, use.w)
}
names(extra$percentile) = names(misc$link)
@@ -1236,6 +1295,7 @@ amlnormal.deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
}), list( .lexpectile = lexpectile,
.eexpectile = eexpectile, .parallel = parallel ))),
vfamily = c("amlnormal"),
+
deriv = eval(substitute(expression({
mymu = eta2theta(eta, .lexpectile, earg = .eexpectile)
dexpectile.deta = dtheta.deta(mymu, .lexpectile, earg = .eexpectile)
@@ -1245,6 +1305,7 @@ amlnormal.deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
c(w) * myresid * wor1 * dexpectile.deta
}), list( .lexpectile = lexpectile,
.eexpectile = eexpectile ))),
+
weight = eval(substitute(expression({
wz = c(w) * wor1 * dexpectile.deta^2
wz
@@ -1261,7 +1322,7 @@ amlnormal.deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
-amlpoisson.deviance = function(mu, y, w, residuals = FALSE, eta,
+amlpoisson.deviance <- function(mu, y, w, residuals = FALSE, eta,
extra = NULL) {
M <- length(extra$w.aml)
@@ -1278,8 +1339,8 @@ amlpoisson.deviance = function(mu, y, w, residuals = FALSE, eta,
} else {
all.deviances = numeric(M)
myresid = matrix(y,extra$n,extra$M) - cbind(mu)
- for(ii in 1:M) all.deviances[ii] = 2 * sum(w * devi[,ii] *
- Wr1(myresid[,ii], w=extra$w.aml[ii]))
+ for(ii in 1:M) all.deviances[ii] = 2 * sum(c(w) * devi[, ii] *
+ Wr1(myresid[, ii], w=extra$w.aml[ii]))
}
if (is.logical(extra$individual) && extra$individual)
all.deviances else sum(all.deviances)
@@ -1287,38 +1348,49 @@ amlpoisson.deviance = function(mu, y, w, residuals = FALSE, eta,
amlpoisson <- function(w.aml = 1, parallel = FALSE, imethod = 1,
- digw = 4, link = "loge", earg = list())
+ digw = 4, link = "loge")
{
- if (!is.Numeric(w.aml, positive = TRUE))
- stop("'w.aml' must be a vector of positive values")
-
- if (mode(link)!= "character" && mode(link)!= "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
-
- new("vglmff",
- blurb = c("Poisson expectile regression by",
- " asymmetric maximum likelihood estimation\n\n",
- "Link: ", namesof("expectile", link, earg = earg)),
- constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints)
- }), list( .parallel = parallel ))),
- deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- amlpoisson.deviance(mu = mu, y = y, w = w, residuals = residuals,
- eta = eta, extra = extra)
- },
- initialize = eval(substitute(expression({
- extra$w.aml = .w.aml
- if (ncol(y <- cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- extra$M = M = length(extra$w.aml) # Recycle if necessary
- extra$n = n
+ if (!is.Numeric(w.aml, positive = TRUE))
+ stop("'w.aml' must be a vector of positive values")
+
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+ new("vglmff",
+ blurb = c("Poisson expectile regression by",
+ " asymmetric maximum likelihood estimation\n\n",
+ "Link: ", namesof("expectile", link, earg = earg)),
+ constraints = eval(substitute(expression({
+ constraints = cm.vgam(matrix(1, M,1), x, .parallel, constraints)
+ }), list( .parallel = parallel ))),
+ deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ amlpoisson.deviance(mu = mu, y = y, w = w, residuals = residuals,
+ eta = eta, extra = extra)
+ },
+ initialize = eval(substitute(expression({
+ extra$w.aml = .w.aml
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+ extra$M = M = length(extra$w.aml) # Recycle if necessary
+ extra$n = n
extra$y.names = y.names =
- paste("w.aml = ", round(extra$w.aml, digits = .digw), sep = "")
+ paste("w.aml = ", round(extra$w.aml, digits = .digw ), sep = "")
extra$individual = FALSE
- predictors.names =
+ predictors.names <-
c(namesof(paste("expectile(",y.names,")", sep = ""),
- .link , earg = .earg, tag = FALSE))
+ .link , earg = .earg , tag = FALSE))
if (!length(etastart)) {
mean.init = if ( .imethod == 2)
@@ -1329,59 +1401,66 @@ amlpoisson.deviance = function(mu, y, w, residuals = FALSE, eta,
abs(junk$fitted)
}
etastart =
- matrix(theta2eta(mean.init, .link , earg = .earg), n, M)
+ matrix(theta2eta(mean.init, .link , earg = .earg ), n, M)
}
}), list( .link = link, .earg = earg, .imethod = imethod,
.digw = digw, .w.aml = w.aml ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
mu.ans = eta = as.matrix(eta)
for(ii in 1:ncol(eta))
- mu.ans[,ii] = eta2theta(eta[,ii], .link , earg = .earg)
+ mu.ans[, ii] = eta2theta(eta[, ii], .link , earg = .earg )
dimnames(mu.ans) = list(dimnames(eta)[[1]], extra$y.names)
mu.ans
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = rep(.link , length = M)
- names(misc$link) = extra$y.names
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- misc$parallel = .parallel
- misc$expected = TRUE
- extra$percentile = numeric(M)
- for(ii in 1:M)
- extra$percentile[ii] = 100 * weighted.mean(myresid[,ii] <= 0, w)
- names(extra$percentile) = names(misc$link)
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$multipleResponses <- TRUE
+ misc$expected = TRUE
+ misc$parallel = .parallel
+
+
+ misc$link = rep(.link , length = M)
+ names(misc$link) = extra$y.names
+
+ misc$earg = vector("list", M)
+ for (ilocal in 1:M)
+ misc$earg[[ilocal]] <- list(theta = NULL)
+ names(misc$earg) = names(misc$link)
+
+ extra$percentile = numeric(M)
+ for(ii in 1:M)
+ extra$percentile[ii] = 100 * weighted.mean(myresid[, ii] <= 0, w)
+ names(extra$percentile) = names(misc$link)
extra$individual = TRUE
extra$deviance = amlpoisson.deviance(mu = mu, y = y, w = w,
residuals = FALSE, eta = eta, extra = extra)
- names(extra$deviance) = extra$y.names
- }), list( .link = link, .earg = earg, .parallel = parallel ))),
- linkfun = eval(substitute(function(mu, extra = NULL) {
- theta2eta(mu, link = .link , earg = .earg)
- }, list( .link = link, .earg = earg ))),
- vfamily = c("amlpoisson"),
- deriv = eval(substitute(expression({
- mymu = eta2theta(eta, .link , earg = .earg)
- dexpectile.deta = dtheta.deta(mymu, .link , earg = .earg)
- myresid = matrix(y,extra$n,extra$M) - cbind(mu)
- wor1 = Wr2(myresid, w = matrix(extra$w.aml, extra$n, extra$M,
- byrow = TRUE))
- c(w) * myresid * wor1 * (dexpectile.deta / mymu)
- }), list( .link = link, .earg = earg ))),
- weight = eval(substitute(expression({
- use.mu = mymu
- use.mu[use.mu < .Machine$double.eps^(3/4)] = .Machine$double.eps^(3/4)
- wz = c(w) * wor1 * use.mu * (dexpectile.deta / mymu)^2
- wz
- }), list( .link = link, .earg = earg ))))
+ names(extra$deviance) = extra$y.names
+ }), list( .link = link, .earg = earg, .parallel = parallel ))),
+ linkfun = eval(substitute(function(mu, extra = NULL) {
+ theta2eta(mu, link = .link , earg = .earg )
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("amlpoisson"),
+ deriv = eval(substitute(expression({
+ mymu = eta2theta(eta, .link , earg = .earg )
+ dexpectile.deta = dtheta.deta(mymu, .link , earg = .earg )
+ myresid = matrix(y,extra$n,extra$M) - cbind(mu)
+ wor1 = Wr2(myresid, w = matrix(extra$w.aml, extra$n, extra$M,
+ byrow = TRUE))
+ c(w) * myresid * wor1 * (dexpectile.deta / mymu)
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+ use.mu = mymu
+ use.mu[use.mu < .Machine$double.eps^(3/4)] = .Machine$double.eps^(3/4)
+ wz = c(w) * wor1 * use.mu * (dexpectile.deta / mymu)^2
+ wz
+ }), list( .link = link, .earg = earg ))))
}
-amlbinomial.deviance = function(mu, y, w, residuals = FALSE,
+amlbinomial.deviance <- function(mu, y, w, residuals = FALSE,
eta, extra = NULL) {
M <- length(extra$w.aml)
@@ -1400,48 +1479,52 @@ amlbinomial.deviance = function(mu, y, w, residuals = FALSE,
warning("fitted values close to 0 or 1")
smu <- mu[small]
sy <- y[small]
- smu <- ifelse(smu < .Machine$double.eps, .Machine$double.eps, smu)
+ smu <- ifelse(smu < .Machine$double.eps,
+ .Machine$double.eps, smu)
onemsmu <- ifelse((1 - smu) < .Machine$double.eps,
.Machine$double.eps, 1 - smu)
devmu[small] <- sy * log(smu) + (1 - sy) * log(onemsmu)
}
devi <- 2 * (devy - devmu)
if (residuals) {
- stop("not sure here")
- return(sign(y - mu) * sqrt(abs(devi) * w))
+ stop("not sure here")
+ return(sign(y - mu) * sqrt(abs(devi) * w))
} else {
- all.deviances = numeric(M)
- myresid = matrix(y,extra$n,extra$M) - matrix(mu,extra$n,extra$M)
- for(ii in 1:M) all.deviances[ii] = sum(w * devi[,ii] *
- Wr1(myresid[,ii], w=extra$w.aml[ii]))
+ all.deviances = numeric(M)
+ myresid = matrix(y,extra$n,extra$M) - matrix(mu,extra$n,extra$M)
+ for(ii in 1:M) all.deviances[ii] = sum(c(w) * devi[, ii] *
+ Wr1(myresid[, ii], w=extra$w.aml[ii]))
}
if (is.logical(extra$individual) && extra$individual)
- all.deviances else sum(all.deviances)
+ all.deviances else sum(all.deviances)
}
- amlbinomial <- function(w.aml = 1, parallel= FALSE, digw = 4,
- link = "logit", earg = list())
+ amlbinomial <- function(w.aml = 1, parallel = FALSE, digw = 4,
+ link = "logit")
{
- if (!is.Numeric(w.aml, positive = TRUE))
- stop("'w.aml' must be a vector of positive values")
- if (mode(link)!= "character" && mode(link)!= "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+ if (!is.Numeric(w.aml, positive = TRUE))
+ stop("'w.aml' must be a vector of positive values")
+
- new("vglmff",
- blurb = c("Logistic expectile regression by ",
- "asymmetric maximum likelihood estimation\n\n",
- "Link: ", namesof("expectile", link, earg = earg)),
- constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints)
- }), list( .parallel = parallel ))),
- deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- amlbinomial.deviance(mu = mu, y = y, w = w, residuals = residuals,
- eta = eta, extra = extra)
- },
- initialize = eval(substitute(expression({
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+ new("vglmff",
+ blurb = c("Logistic expectile regression by ",
+ "asymmetric maximum likelihood estimation\n\n",
+ "Link: ", namesof("expectile", link, earg = earg)),
+ constraints = eval(substitute(expression({
+ constraints = cm.vgam(matrix(1, M,1), x, .parallel, constraints)
+ }), list( .parallel = parallel ))),
+ deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ amlbinomial.deviance(mu = mu, y = y, w = w, residuals = residuals,
+ eta = eta, extra = extra)
+ },
+ initialize = eval(substitute(expression({
{
@@ -1477,62 +1560,67 @@ amlbinomial.deviance = function(mu, y, w, residuals = FALSE,
extra$M = M = length(extra$w.aml) # Recycle if necessary
extra$n = n
extra$y.names = y.names =
- paste("w.aml = ", round(extra$w.aml, digits = .digw), sep = "")
+ paste("w.aml = ", round(extra$w.aml, digits = .digw ), sep = "")
extra$individual = FALSE
- predictors.names =
+ predictors.names <-
c(namesof(paste("expectile(", y.names, ")", sep = ""),
- .link , earg = .earg, tag = FALSE))
+ .link , earg = .earg , tag = FALSE))
if (!length(etastart)) {
- etastart = matrix(theta2eta(mustart, .link , earg = .earg), n, M)
+ etastart = matrix(theta2eta(mustart, .link , earg = .earg ), n, M)
mustart = NULL
}
- }), list( .link = link, .earg = earg,
- .digw = digw, .w.aml = w.aml ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- mu.ans = eta = as.matrix(eta)
- for(ii in 1:ncol(eta))
- mu.ans[,ii] = eta2theta(eta[,ii], .link , earg = .earg)
- dimnames(mu.ans) = list(dimnames(eta)[[1]], extra$y.names)
- mu.ans
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = rep(.link , length = M)
- names(misc$link) = extra$y.names
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- misc$parallel = .parallel
- misc$expected = TRUE
- extra$percentile = numeric(M)
- for(ii in 1:M)
- extra$percentile[ii] = 100 * weighted.mean(myresid[,ii] <= 0, w)
- names(extra$percentile) = names(misc$link)
+ }), list( .link = link, .earg = earg,
+ .digw = digw, .w.aml = w.aml ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ mu.ans = eta = as.matrix(eta)
+ for(ii in 1:ncol(eta))
+ mu.ans[, ii] = eta2theta(eta[, ii], .link , earg = .earg )
+ dimnames(mu.ans) = list(dimnames(eta)[[1]], extra$y.names)
+ mu.ans
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link = rep(.link , length = M)
+ names(misc$link) = extra$y.names
- extra$individual = TRUE
- extra$deviance = amlbinomial.deviance(mu = mu, y = y, w = w,
- residuals = FALSE, eta = eta, extra = extra)
- names(extra$deviance) = extra$y.names
- }), list( .link = link, .earg = earg, .parallel = parallel ))),
- linkfun = eval(substitute(function(mu, extra = NULL) {
- theta2eta(mu, link = .link , earg = .earg)
- }, list( .link = link, .earg = earg ))),
- vfamily = c("amlbinomial"),
- deriv = eval(substitute(expression({
- mymu = eta2theta(eta, .link , earg = .earg)
- use.mu = mymu
- use.mu[use.mu < .Machine$double.eps^(3/4)] = .Machine$double.eps^(3/4)
- dexpectile.deta = dtheta.deta(use.mu, .link , earg = .earg)
- myresid = matrix(y,extra$n,extra$M) - cbind(mu)
- wor1 = Wr2(myresid, w = matrix(extra$w.aml, extra$n, extra$M,
- byrow = TRUE))
- c(w) * myresid * wor1 * (dexpectile.deta / (use.mu * (1-use.mu)))
- }), list( .link = link, .earg = earg ))),
- weight = eval(substitute(expression({
- wz = c(w) * wor1 * (dexpectile.deta^2 / (use.mu * (1 - use.mu)))
- wz
- }), list( .link = link, .earg = earg ))))
+ misc$earg = vector("list", M)
+ for (ilocal in 1:M)
+ misc$earg[[ilocal]] <- list(theta = NULL)
+ names(misc$earg) = names(misc$link)
+
+ misc$parallel = .parallel
+ misc$expected = TRUE
+
+ extra$percentile = numeric(M)
+ for(ii in 1:M)
+ extra$percentile[ii] = 100 * weighted.mean(myresid[, ii] <= 0, w)
+ names(extra$percentile) = names(misc$link)
+
+ extra$individual = TRUE
+ extra$deviance = amlbinomial.deviance(mu = mu, y = y, w = w,
+ residuals = FALSE, eta = eta, extra = extra)
+ names(extra$deviance) = extra$y.names
+ }), list( .link = link, .earg = earg, .parallel = parallel ))),
+ linkfun = eval(substitute(function(mu, extra = NULL) {
+ theta2eta(mu, link = .link , earg = .earg )
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("amlbinomial"),
+ deriv = eval(substitute(expression({
+ mymu = eta2theta(eta, .link , earg = .earg )
+ use.mu = mymu
+ use.mu[use.mu < .Machine$double.eps^(3/4)] = .Machine$double.eps^(3/4)
+ dexpectile.deta = dtheta.deta(use.mu, .link , earg = .earg )
+ myresid = matrix(y,extra$n,extra$M) - cbind(mu)
+ wor1 = Wr2(myresid, w = matrix(extra$w.aml, extra$n, extra$M,
+ byrow = TRUE))
+ c(w) * myresid * wor1 * (dexpectile.deta / (use.mu * (1-use.mu)))
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+ wz = c(w) * wor1 * (dexpectile.deta^2 / (use.mu * (1 - use.mu)))
+ wz
+ }), list( .link = link, .earg = earg))))
}
@@ -1544,135 +1632,157 @@ amlbinomial.deviance = function(mu, y, w, residuals = FALSE,
-amlexponential.deviance = function(mu, y, w, residuals = FALSE,
+amlexponential.deviance <- function(mu, y, w, residuals = FALSE,
eta, extra = NULL) {
- M <- length(extra$w.aml)
+ M <- length(extra$w.aml)
- if (M > 1) y = matrix(y,extra$n,extra$M)
+ if (M > 1) y = matrix(y,extra$n,extra$M)
- devy = cbind(-log(y) - 1)
- devi = cbind(-log(mu) - y / mu)
- if (residuals) {
- stop("not sure here")
- return(sign(y - mu) * sqrt(2 * abs(devi) * w) *
- matrix(extra$w,extra$n,extra$M))
- } else {
- all.deviances = numeric(M)
- myresid = matrix(y,extra$n,extra$M) - cbind(mu)
- for(ii in 1:M) all.deviances[ii] = 2 * sum(w *
- (devy[,ii] - devi[,ii]) *
- Wr1(myresid[,ii], w=extra$w.aml[ii]))
- }
- if (is.logical(extra$individual) && extra$individual)
- all.deviances else sum(all.deviances)
+ devy = cbind(-log(y) - 1)
+ devi = cbind(-log(mu) - y / mu)
+ if (residuals) {
+ stop("not sure here")
+ return(sign(y - mu) * sqrt(2 * abs(devi) * w) *
+ matrix(extra$w,extra$n,extra$M))
+ } else {
+ all.deviances = numeric(M)
+ myresid = matrix(y,extra$n,extra$M) - cbind(mu)
+ for(ii in 1:M) all.deviances[ii] = 2 * sum(c(w) *
+ (devy[, ii] - devi[, ii]) *
+ Wr1(myresid[, ii], w=extra$w.aml[ii]))
+ }
+ if (is.logical(extra$individual) && extra$individual)
+ all.deviances else sum(all.deviances)
}
amlexponential <- function(w.aml = 1, parallel = FALSE, imethod = 1,
- digw = 4, link = "loge", earg = list())
+ digw = 4, link = "loge")
{
- if (!is.Numeric(w.aml, positive = TRUE))
- stop("'w.aml' must be a vector of positive values")
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 3)
- stop("argument 'imethod' must be 1, 2 or 3")
-
- if (mode(link)!= "character" && mode(link)!= "name")
- link = as.character(substitute(link))
-
- if (!is.list(earg)) earg = list()
-
- y.names = paste("w.aml = ", round(w.aml, digits = digw), sep = "")
- predictors.names = c(namesof(
- paste("expectile(", y.names,")", sep = ""), link, earg = earg))
- predictors.names = paste(predictors.names, collapse = ", ")
-
- new("vglmff",
- blurb = c("Exponential expectile regression by",
- " asymmetric maximum likelihood estimation\n\n",
- "Link: ", predictors.names),
- constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints)
- }), list( .parallel = parallel ))),
- deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- amlexponential.deviance(mu = mu, y = y, w = w,
- residuals = residuals,
- eta = eta, extra = extra)
- },
- initialize = eval(substitute(expression({
- extra$w.aml = .w.aml
- if (ncol(y <- cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- if (any(y <= 0.0))
- stop("all responses must be positive")
- extra$M = M = length(extra$w.aml) # Recycle if necessary
- extra$n = n
- extra$y.names = y.names =
- paste("w.aml = ", round(extra$w.aml, digits = .digw), sep = "")
- extra$individual = FALSE
- predictors.names = c(namesof(
- paste("expectile(", y.names, ")", sep = ""),
- .link , earg = .earg , tag = FALSE))
+ if (!is.Numeric(w.aml, positive = TRUE))
+ stop("'w.aml' must be a vector of positive values")
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 3)
+ stop("argument 'imethod' must be 1, 2 or 3")
- if (!length(etastart)) {
- mean.init = if ( .imethod == 1)
- rep(median(y), length = n) else
- if ( .imethod == 2)
- rep(weighted.mean(y, w), length = n) else {
- 1 / (y + 1)
- }
- etastart = matrix(theta2eta(mean.init, .link , earg = .earg),
- n, M)
- }
- }), list( .link = link, .earg = earg, .imethod = imethod,
- .digw = digw, .w.aml = w.aml ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- mu.ans = eta = as.matrix(eta)
- for(ii in 1:ncol(eta))
- mu.ans[,ii] = eta2theta(eta[,ii], .link , earg = .earg)
- dimnames(mu.ans) = list(dimnames(eta)[[1]], extra$y.names)
- mu.ans
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = rep(.link , length = M)
- names(misc$link) = extra$y.names
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- misc$parallel = .parallel
- misc$expected = TRUE
- extra$percentile = numeric(M)
- for(ii in 1:M)
- extra$percentile[ii] = 100 * weighted.mean(myresid[,ii] <= 0, w)
- names(extra$percentile) = names(misc$link)
- extra$individual = TRUE
- extra$deviance = amlexponential.deviance(mu = mu, y = y, w = w,
- residuals = FALSE, eta = eta, extra = extra)
- names(extra$deviance) = extra$y.names
- }), list( .link = link, .earg = earg, .parallel = parallel ))),
- linkfun = eval(substitute(function(mu, extra = NULL) {
- theta2eta(mu, link = .link , earg = .earg)
- }, list( .link = link, .earg = earg ))),
- vfamily = c("amlexponential"),
- deriv = eval(substitute(expression({
- mymu = eta2theta(eta, .link , earg = .earg)
- bigy = matrix(y,extra$n,extra$M)
- dl.dmu = (bigy - mymu) / mymu^2
- dmu.deta = dtheta.deta(mymu, .link , earg = .earg)
- myresid = bigy - cbind(mymu)
- wor1 = Wr2(myresid, w = matrix(extra$w.aml, extra$n, extra$M,
- byrow = TRUE))
- w * wor1 * dl.dmu * dmu.deta
- }), list( .link = link, .earg = earg ))),
- weight = eval(substitute(expression({
- ned2l.dmu2 = 1 / mymu^2
- wz = w * wor1 * ned2l.dmu2 * dmu.deta^2
- wz
- }), list( .link = link, .earg = earg ))))
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+ y.names = paste("w.aml = ", round(w.aml, digits = digw), sep = "")
+ predictors.names <- c(namesof(
+ paste("expectile(", y.names,")", sep = ""), link, earg = earg))
+ predictors.names <- paste(predictors.names, collapse = ", ")
+
+
+ new("vglmff",
+ blurb = c("Exponential expectile regression by",
+ " asymmetric maximum likelihood estimation\n\n",
+ "Link: ", predictors.names),
+ constraints = eval(substitute(expression({
+ constraints = cm.vgam(matrix(1, M,1), x, .parallel, constraints)
+ }), list( .parallel = parallel ))),
+ deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ amlexponential.deviance(mu = mu, y = y, w = w,
+ residuals = residuals,
+ eta = eta, extra = extra)
+ },
+ initialize = eval(substitute(expression({
+ extra$w.aml = .w.aml
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = 1, ncol.y.max = 1,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+ extra$M = M = length(extra$w.aml) # Recycle if necessary
+ extra$n = n
+ extra$y.names = y.names =
+ paste("w.aml = ", round(extra$w.aml, digits = .digw ), sep = "")
+ extra$individual = FALSE
+
+
+ predictors.names <- c(namesof(
+ paste("expectile(", y.names, ")", sep = ""),
+ .link , earg = .earg , tag = FALSE))
+
+ if (!length(etastart)) {
+ mean.init = if ( .imethod == 1)
+ rep(median(y), length = n) else
+ if ( .imethod == 2)
+ rep(weighted.mean(y, w), length = n) else {
+ 1 / (y + 1)
+ }
+ etastart = matrix(theta2eta(mean.init, .link , earg = .earg ),
+ n, M)
+ }
+ }), list( .link = link, .earg = earg, .imethod = imethod,
+ .digw = digw, .w.aml = w.aml ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ mu.ans = eta = as.matrix(eta)
+ for(ii in 1:ncol(eta))
+ mu.ans[, ii] = eta2theta(eta[, ii], .link , earg = .earg )
+ dimnames(mu.ans) = list(dimnames(eta)[[1]], extra$y.names)
+ mu.ans
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$multipleResponses <- TRUE
+ misc$expected = TRUE
+ misc$parallel = .parallel
+
+ misc$link = rep(.link , length = M)
+ names(misc$link) = extra$y.names
+
+ misc$earg = vector("list", M)
+ for (ilocal in 1:M)
+ misc$earg[[ilocal]] <- list(theta = NULL)
+ names(misc$earg) = names(misc$link)
+
+
+ extra$percentile = numeric(M)
+ for(ii in 1:M)
+ extra$percentile[ii] = 100 * weighted.mean(myresid[, ii] <= 0, w)
+ names(extra$percentile) = names(misc$link)
+
+ extra$individual = TRUE
+ extra$deviance =
+ amlexponential.deviance(mu = mu, y = y, w = w,
+ residuals = FALSE, eta = eta, extra = extra)
+ names(extra$deviance) = extra$y.names
+ }), list( .link = link, .earg = earg, .parallel = parallel ))),
+ linkfun = eval(substitute(function(mu, extra = NULL) {
+ theta2eta(mu, link = .link , earg = .earg )
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("amlexponential"),
+ deriv = eval(substitute(expression({
+ mymu = eta2theta(eta, .link , earg = .earg )
+ bigy = matrix(y,extra$n,extra$M)
+ dl.dmu = (bigy - mymu) / mymu^2
+
+ dmu.deta = dtheta.deta(mymu, .link , earg = .earg )
+ myresid = bigy - cbind(mymu)
+ wor1 = Wr2(myresid, w = matrix(extra$w.aml, extra$n, extra$M,
+ byrow = TRUE))
+ c(w) * wor1 * dl.dmu * dmu.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+ ned2l.dmu2 = 1 / mymu^2
+ wz = c(w) * wor1 * ned2l.dmu2 * dmu.deta^2
+ wz
+ }), list( .link = link, .earg = earg ))))
}
@@ -1680,75 +1790,82 @@ amlexponential.deviance = function(mu, y, w, residuals = FALSE,
-rho1check = function(u, tau = 0.5)
- u * (tau - (u <= 0))
+rho1check <- function(u, tau = 0.5)
+ u * (tau - (u <= 0))
-dalap = function(x, location = 0, scale = 1, tau = 0.5,
+
+
+
+dalap <- function(x, location = 0, scale = 1, tau = 0.5,
kappa = sqrt(tau/(1-tau)), log = FALSE) {
- if (!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
- rm(log)
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+
+
- NN = max(length(x), length(location), length(scale), length(kappa))
- location = rep(location, length.out = NN);
- scale = rep(scale, length.out = NN)
- kappa = rep(kappa, length.out = NN);
- x = rep(x, length.out = NN)
- tau = rep(tau, length.out = NN)
+ NN = max(length(x), length(location), length(scale), length(kappa))
+ location = rep(location, length.out = NN);
+ scale = rep(scale, length.out = NN)
+ kappa = rep(kappa, length.out = NN);
+ x = rep(x, length.out = NN)
+ tau = rep(tau, length.out = NN)
- logconst = 0.5 * log(2) - log(scale) + log(kappa) - log1p(kappa^2)
- exponent = -(sqrt(2) / scale) * abs(x - location) *
- ifelse(x >= location, kappa, 1/kappa)
+ logconst = 0.5 * log(2) - log(scale) + log(kappa) - log1p(kappa^2)
+ exponent = -(sqrt(2) / scale) * abs(x - location) *
+ ifelse(x >= location, kappa, 1/kappa)
- indexTF = (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
- logconst[!indexTF] = NaN
+ indexTF = (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ logconst[!indexTF] = NaN
- if (log.arg) logconst + exponent else exp(logconst + exponent)
+ if (log.arg) logconst + exponent else exp(logconst + exponent)
}
-ralap = function(n, location = 0, scale = 1, tau = 0.5,
+ralap <- function(n, location = 0, scale = 1, tau = 0.5,
kappa = sqrt(tau/(1-tau))) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
- stop("bad input for argument 'n'") else n
-
- location = rep(location, length.out = use.n);
- scale = rep(scale, length.out = use.n)
- tau = rep(tau, length.out = use.n);
- kappa = rep(kappa, length.out = use.n);
- ans = location + scale *
- log(runif(use.n)^kappa / runif(use.n)^(1/kappa)) / sqrt(2)
- indexTF = (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
- ans[!indexTF] = NaN
- ans
+ use.n = if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+
+ location = rep(location, length.out = use.n);
+ scale = rep(scale, length.out = use.n)
+ tau = rep(tau, length.out = use.n);
+ kappa = rep(kappa, length.out = use.n);
+ ans = location + scale *
+ log(runif(use.n)^kappa / runif(use.n)^(1/kappa)) / sqrt(2)
+ indexTF = (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ ans[!indexTF] = NaN
+ ans
}
-palap = function(q, location = 0, scale = 1, tau = 0.5,
+palap <- function(q, location = 0, scale = 1, tau = 0.5,
kappa = sqrt(tau/(1-tau))) {
- NN = max(length(q), length(location), length(scale), length(kappa))
- location = rep(location, length.out = NN);
- scale = rep(scale, length.out = NN)
- kappa = rep(kappa, length.out = NN);
- q = rep(q, length.out = NN)
- tau = rep(tau, length.out = NN);
-
- exponent = -(sqrt(2) / scale) * abs(q - location) *
- ifelse(q >= location, kappa, 1/kappa)
- temp5 = exp(exponent) / (1 + kappa^2)
- ans = 1 - temp5
- index1 = (q < location)
- ans[index1] = (kappa[index1])^2 * temp5[index1]
-
- indexTF = (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
- ans[!indexTF] = NaN
- ans
+ NN = max(length(q), length(location), length(scale), length(kappa))
+ location = rep(location, length.out = NN);
+ scale = rep(scale, length.out = NN)
+ kappa = rep(kappa, length.out = NN);
+ q = rep(q, length.out = NN)
+ tau = rep(tau, length.out = NN);
+
+ exponent = -(sqrt(2) / scale) * abs(q - location) *
+ ifelse(q >= location, kappa, 1/kappa)
+ temp5 = exp(exponent) / (1 + kappa^2)
+ ans = 1 - temp5
+ index1 = (q < location)
+ ans[index1] = (kappa[index1])^2 * temp5[index1]
+
+ indexTF = (scale > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ ans[!indexTF] = NaN
+ ans
}
-qalap = function(p, location = 0, scale = 1, tau = 0.5,
- kappa = sqrt(tau/(1-tau))) {
+qalap <- function(p, location = 0, scale = 1, tau = 0.5,
+ kappa = sqrt(tau / (1 - tau))) {
NN = max(length(p), length(location), length(scale), length(kappa))
location = rep(location, length.out = NN);
scale = rep(scale, length.out = NN)
@@ -1777,447 +1894,263 @@ qalap = function(p, location = 0, scale = 1, tau = 0.5,
- if (FALSE)
-dqregal = function(x, tau = 0.5, location = 0, scale = 1) {
- if (!is.Numeric(scale, positive = TRUE))
- stop("'scale' must be positive")
- if (!is.Numeric(tau, positive = TRUE) ||
- max(tau) >= 1)
- stop("argument 'tau' must have values in (0,1)")
- const = tau * (1-tau) / scale
- const * exp(-rho1check((x-location)/scale, tau = tau))
+
+
+rloglap <- function(n, location.ald = 0, scale.ald = 1, tau = 0.5,
+ kappa = sqrt(tau/(1-tau))) {
+ use.n = if ((length.n <- length(n)) > 1) length.n else
+ if (!is.Numeric(n, integer.valued = TRUE,
+ allowable.length = 1, positive = TRUE))
+ stop("bad input for argument 'n'") else n
+ location.ald = rep(location.ald, length.out = use.n);
+ scale.ald = rep(scale.ald, length.out = use.n)
+ tau = rep(tau, length.out = use.n);
+ kappa = rep(kappa, length.out = use.n);
+ ans = exp(location.ald) *
+ (runif(use.n)^kappa / runif(use.n)^(1/kappa))^(scale.ald / sqrt(2))
+ indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ ans[!indexTF] = NaN
+ ans
}
+dloglap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
+ kappa = sqrt(tau/(1-tau)), log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
- if (FALSE)
-rqregal = function(n, tau = 0.5, location = 0, scale = 1) {
- if (!is.Numeric(n, positive = TRUE, integer.valued = TRUE,
- allowable.length = 1))
- stop("bad input for argument 'n'")
- if (!is.Numeric(scale, positive = TRUE))
- stop("'scale' must be positive")
- if (!is.Numeric(tau, positive = TRUE) || max(tau) >= 1)
- stop("'tau' must have values in (0,1)")
- location = rep(location, length.out = n);
- scale = rep(scale, length.out = n)
- r = runif(n)
- location - sign(r-tau) * scale * log(2*ifelse(r < tau, r, 1-r))
+
+ NN = max(length(x), length(location.ald),
+ length(scale.ald), length(kappa))
+ location = rep(location.ald, length.out = NN);
+ scale = rep(scale.ald, length.out = NN)
+ kappa = rep(kappa, length.out = NN);
+ x = rep(x, length.out = NN)
+ tau = rep(tau, length.out = NN)
+
+ Alpha = sqrt(2) * kappa / scale.ald
+ Beta = sqrt(2) / (scale.ald * kappa)
+ Delta = exp(location.ald)
+ exponent = ifelse(x >= Delta, -(Alpha+1), (Beta-1)) *
+ (log(x) - location.ald)
+ logdensity = -location.ald + log(Alpha) + log(Beta) -
+ log(Alpha + Beta) + exponent
+ indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ logdensity[!indexTF] = NaN
+ logdensity[x < 0 & indexTF] = -Inf
+ if (log.arg) logdensity else exp(logdensity)
}
+qloglap <- function(p, location.ald = 0, scale.ald = 1,
+ tau = 0.5, kappa = sqrt(tau/(1-tau))) {
+ NN = max(length(p), length(location.ald), length(scale.ald),
+ length(kappa))
+ location = rep(location.ald, length.out = NN);
+ scale = rep(scale.ald, length.out = NN)
+ kappa = rep(kappa, length.out = NN);
+ p = rep(p, length.out = NN)
+ tau = rep(tau, length.out = NN)
- if (FALSE)
-pqregal = function(q, tau = 0.5, location = 0, scale = 1) {
- if (!all(scale == 1))
- stop("currently can only handle scale == 1")
- if (!is.Numeric(q))
- stop("bad input for argument 'q'")
- if (!is.Numeric(location))
- stop("bad input for argument 'location'")
- if (!is.Numeric(scale, positive = TRUE))
- stop("'scale' must be positive")
- if (!is.Numeric(tau, positive = TRUE) || max(tau) >= 1)
- stop("argument 'tau' must have values in (0,1)")
-
- N = max(length(q), length(tau), length(location), length(scale))
- location = rep(location, length.out = N);
- scale = rep(scale, length.out = N)
- tau = rep(tau, length.out = N);
- q = rep(q, length.out = N)
-
- ans = tau * exp(-(location - q) * (1 - tau))
- index1 = (q > location)
- ans[index1] = (1 - (1-tau) * exp(-tau * (q - location)))[index1]
+ Alpha = sqrt(2) * kappa / scale.ald
+ Beta = sqrt(2) / (scale.ald * kappa)
+ Delta = exp(location.ald)
+
+ temp9 = Alpha + Beta
+ ans = Delta * (p * temp9 / Alpha)^(1/Beta)
+ index1 = (p > Alpha / temp9)
+ ans[index1] = (Delta * ((1-p) * temp9 / Beta)^(-1/Alpha))[index1]
+ ans[p == 0] = 0
+ ans[p == 1] = Inf
+
+ indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0)
+ (p >= 0) & (p <= 1) # &
+ ans[!indexTF] = NaN
ans
}
- if (FALSE)
-qregal = function(tau = c(0.25, 0.5, 0.75),
- llocation = "identity",
- elocation = list(),
- lscale = "loge", escale = list(),
- ilocation = NULL,
- parallel = FALSE, imethod = 1, digt = 4) {
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
-
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
- if (!is.Numeric(tau, positive = TRUE) || max(tau) >= 1)
- stop("bad input for argument 'tau'")
-
- if (!is.list(elocation)) elocation = list()
-
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (!is.list(escale)) escale = list()
-
- new("vglmff",
- blurb = c("Quantile Regression via an ",
- "Asymmetric Laplace distribution\n\n",
- "Links: ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("location", llocation, earg = elocation)),
- constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1,M,1), x, .parallel, constraints)
- }), list( .parallel = parallel ))),
- initialize = eval(substitute(expression({
- extra$tau = .tau
- if (ncol(y <- cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- extra$M = M = 1 + length(extra$tau)
- extra$n = n
- extra$y.names = y.names =
- paste("tau = ", round(extra$tau, digits = .digt ), sep = "")
- extra$individual = FALSE
- predictors.names = c(
- namesof("scale", .lscale, earg = .escale , tag = FALSE),
- namesof(paste("quantile(", y.names, ")", sep = ""),
- link = .llocat , earg = .elocat , tag = FALSE))
-
- if (!length(etastart)) {
- if ( .imethod == 1) {
- locat.init = median(y)
- } else {
- locat.init = y
- }
- locat.init = if (length(.ilocat)) {
- matrix( .ilocat, n, M-1, byrow = TRUE)
- } else {
- rep(locat.init, length.out = n)
- }
- scale.init = rep(1.0, length.out = n)
- etastart = cbind(
- theta2eta(scale.init, .lscale, earg = .escale),
- matrix(
- theta2eta(locat.init, .llocat, earg = .elocat), n, M-1))
- }
- }), list( .imethod = imethod, .tau = tau, .digt = digt,
- .elocat = elocation, .escale = escale,
- .llocat = llocation, .lscale = lscale,
- .ilocat = ilocation ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta = as.matrix(eta)
- xi.ans = matrix(0, nrow(eta), ncol(eta)-1)
- for(ii in 1:(ncol(eta)-1))
- xi.ans[,ii] = eta2theta(eta[,ii+1], .llocat, earg = .elocat)
- dimnames(xi.ans) = list(dimnames(eta)[[1]], extra$y.names)
- xi.ans
- }, list( .elocat = elocation, .llocat = llocation, .tau = tau,
- .escale = escale, .lscale = lscale ))),
- last = eval(substitute(expression({
- misc$link = rep( .llocat, length = M)
- names(misc$link) = extra$y.names
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
-
- extra$percentile = numeric(M)
- for(ii in 1:M)
- extra$percentile[ii] = 100 *
- weighted.mean(ymat[,ii] - mu[,ii] <= 0, w)
- names(extra$percentile) = names(misc$link)
-
- misc$expected = TRUE
- misc$RegCondOK = FALSE # Save this for later
- misc$tau = .tau
- }), list( .elocat = elocation, .llocat = llocation, .tau = tau,
- .escale = escale, .lscale = lscale ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE,eta, extra = NULL) {
- locmat = eta2theta(eta[, -1, drop = FALSE],
- .llocat, earg = .elocat)
- scalemat = matrix(eta2theta(eta[,1,drop = FALSE], .lscale,
- earg = .escale), nrow = extra$n, ncol = extra$M - 1)
- taumat = matrix(extra$tau, nrow = extra$n, ncol = extra$M - 1, byrow = TRUE)
- ymat = matrix(y, nrow = extra$n, ncol = extra$M - 1)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * (-log(scalemat) + log(taumat) + log1p(-taumat) -
- rho1check((ymat-locmat)/scalemat, tau = taumat)))
- }, list( .elocat = elocation, .llocat = llocation,
- .escale = escale, .lscale = lscale, .tau = tau ))),
- vfamily = c("qregal"),
- deriv = eval(substitute(expression({
- ymat = matrix(y, nrow = extra$n, ncol = extra$M - 1)
- taumat = matrix(extra$tau, nrow = extra$n, ncol = extra$M - 1,
- byrow = TRUE)
- scalemat = matrix(eta2theta(eta[,1,drop = FALSE], .lscale,
- earg = .escale),
- nrow = extra$n, ncol = extra$M - 1)
- locmat = eta2theta(eta[,-1,drop = FALSE], .llocat, earg = .elocat)
-
- dl.dlocation = taumat / scalemat
- index1 = (ymat < locmat)
- dl.dlocation[index1] = ((taumat - 1) / scalemat)[index1]
-
- dlocation.deta = dtheta.deta(locmat, .llocat, earg = .elocat)
- dscale.deta = dtheta.deta(scalemat, .lscale, earg = .escale)
-
- c(w) * cbind(dl.dlocation * dlocation.deta)
- }), list( .tau = tau, .elocat = elocation, .llocat = llocation,
- .escale = escale, .lscale = lscale ))),
- weight = eval(substitute(expression({
- wz = matrix(0, nrow = n, M) # Diagonal
- ed2l.dlocation2 = taumat * (1 - taumat) / scalemat^2
- ed2l.dscale2 = 2 * (3*taumat^2 - 3*taumat+1) / (scalemat^2 *
- taumat * (1-taumat))
- wz[,iam(1,1,M)] = ed2l.dscale2 * dscale.deta^2
- wz[,-1] = ed2l.dlocation2 * dlocation.deta^2
- c(w) * wz
- }), list( .tau = tau, .elocat = elocation, .llocat = llocation,
- .escale = escale, .lscale = lscale ))))
-}
+ploglap <- function(q, location.ald = 0, scale.ald = 1,
+ tau = 0.5, kappa = sqrt(tau/(1-tau))) {
+ NN = max(length(q), length(location.ald), length(scale.ald),
+ length(kappa))
+ location = rep(location.ald, length.out = NN);
+ scale = rep(scale.ald, length.out = NN)
+ kappa = rep(kappa, length.out = NN);
+ q = rep(q, length.out = NN)
+ tau = rep(tau, length.out = NN)
+ Alpha = sqrt(2) * kappa / scale.ald
+ Beta = sqrt(2) / (scale.ald * kappa)
+ Delta = exp(location.ald)
+ temp9 = Alpha + Beta
+ ans = (Alpha / temp9) * (q / Delta)^(Beta)
+ ans[q <= 0] = 0
+ index1 = (q >= Delta)
+ ans[index1] = (1 - (Beta/temp9) * (Delta/q)^(Alpha))[index1]
-rloglap = function(n, location.ald = 0, scale.ald = 1, tau = 0.5,
- kappa = sqrt(tau/(1-tau))) {
- use.n = if ((length.n <- length(n)) > 1) length.n else
- if (!is.Numeric(n, integer.valued = TRUE,
- allowable.length = 1, positive = TRUE))
- stop("bad input for argument 'n'") else n
- location.ald = rep(location.ald, length.out = use.n);
- scale.ald= rep(scale.ald, length.out = use.n)
- tau = rep(tau, length.out = use.n);
- kappa = rep(kappa, length.out = use.n);
- ans = exp(location.ald) *
- (runif(use.n)^kappa / runif(use.n)^(1/kappa))^(scale.ald / sqrt(2))
- indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
- ans[!indexTF] = NaN
- ans
+ indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ ans[!indexTF] = NaN
+ ans
}
-dloglap = function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
- kappa = sqrt(tau/(1-tau)), log = FALSE) {
- if (!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
- rm(log)
- NN = max(length(x), length(location.ald),
- length(scale.ald), length(kappa))
- location = rep(location.ald, length.out = NN);
- scale = rep(scale.ald, length.out = NN)
- kappa = rep(kappa, length.out = NN);
- x = rep(x, length.out = NN)
- tau = rep(tau, length.out = NN)
- Alpha = sqrt(2) * kappa / scale.ald
- Beta = sqrt(2) / (scale.ald * kappa)
- Delta = exp(location.ald)
- exponent = ifelse(x >= Delta, -(Alpha+1), (Beta-1)) *
- (log(x) - location.ald)
- logdensity = -location.ald + log(Alpha) + log(Beta) -
- log(Alpha + Beta) + exponent
- indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
- logdensity[!indexTF] = NaN
- logdensity[x < 0 & indexTF] = -Inf
- if (log.arg) logdensity else exp(logdensity)
+rlogitlap <- function(n, location.ald = 0, scale.ald = 1, tau = 0.5,
+ kappa = sqrt(tau/(1-tau))) {
+ logit(ralap(n = n, location = location.ald, scale = scale.ald,
+ tau = tau, kappa = kappa),
+ inverse = TRUE) # earg = earg
}
-qloglap = function(p, location.ald = 0, scale.ald = 1,
- tau = 0.5, kappa = sqrt(tau/(1-tau))) {
- NN = max(length(p), length(location.ald), length(scale.ald),
- length(kappa))
- location = rep(location.ald, length.out = NN);
- scale = rep(scale.ald, length.out = NN)
- kappa = rep(kappa, length.out = NN);
- p = rep(p, length.out = NN)
- tau = rep(tau, length.out = NN)
-
- Alpha = sqrt(2) * kappa / scale.ald
- Beta = sqrt(2) / (scale.ald * kappa)
- Delta = exp(location.ald)
-
- temp9 = Alpha + Beta
- ans = Delta * (p * temp9 / Alpha)^(1/Beta)
- index1 = (p > Alpha / temp9)
- ans[index1] = (Delta * ((1-p) * temp9 / Beta)^(-1/Alpha))[index1]
- ans[p == 0] = 0
- ans[p == 1] = Inf
-
- indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0)
- (p >= 0) & (p <= 1) # &
- ans[!indexTF] = NaN
- ans
-}
+dlogitlap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
+ kappa = sqrt(tau/(1-tau)), log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
-ploglap = function(q, location.ald = 0, scale.ald = 1,
- tau = 0.5, kappa = sqrt(tau/(1-tau))) {
- NN = max(length(q), length(location.ald), length(scale.ald),
- length(kappa))
- location = rep(location.ald, length.out = NN);
- scale = rep(scale.ald, length.out = NN)
- kappa = rep(kappa, length.out = NN);
- q = rep(q, length.out = NN)
- tau = rep(tau, length.out = NN)
+ NN = max(length(x), length(location.ald),
+ length(scale.ald), length(kappa))
+ location = rep(location.ald, length.out = NN);
+ scale = rep(scale.ald, length.out = NN)
+ kappa = rep(kappa, length.out = NN);
+ x = rep(x, length.out = NN)
+ tau = rep(tau, length.out = NN)
- Alpha = sqrt(2) * kappa / scale.ald
- Beta = sqrt(2) / (scale.ald * kappa)
- Delta = exp(location.ald)
+ Alpha = sqrt(2) * kappa / scale.ald
+ Beta = sqrt(2) / (scale.ald * kappa)
+ Delta = logit(location.ald, inverse = TRUE) # earg = earg
+
+ exponent = ifelse(x >= Delta, -Alpha, Beta) *
+ (logit(x) - # earg = earg
+ location.ald)
+ logdensity = log(Alpha) + log(Beta) - log(Alpha + Beta) -
+ log(x) - log1p(-x) + exponent
+ indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ logdensity[!indexTF] = NaN
+ logdensity[x < 0 & indexTF] = -Inf
+ logdensity[x > 1 & indexTF] = -Inf
+ if (log.arg) logdensity else exp(logdensity)
+}
- temp9 = Alpha + Beta
- ans = (Alpha / temp9) * (q / Delta)^(Beta)
- ans[q <= 0] = 0
- index1 = (q >= Delta)
- ans[index1] = (1 - (Beta/temp9) * (Delta/q)^(Alpha))[index1]
- indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
- ans[!indexTF] = NaN
- ans
+qlogitlap <- function(p, location.ald = 0, scale.ald = 1,
+ tau = 0.5, kappa = sqrt(tau/(1-tau))) {
+ qqq = qalap(p = p, location = location.ald, scale = scale.ald,
+ tau = tau, kappa = kappa)
+ ans = logit(qqq, inverse = TRUE) # earg = earg
+ ans[(p < 0) | (p > 1)] = NaN
+ ans[p == 0] = 0
+ ans[p == 1] = 1
+ ans
}
+plogitlap <- function(q, location.ald = 0, scale.ald = 1,
+ tau = 0.5, kappa = sqrt(tau/(1-tau))) {
+ NN = max(length(q), length(location.ald), length(scale.ald),
+ length(kappa))
+ location.ald = rep(location.ald, length.out = NN);
+ scale.ald = rep(scale.ald, length.out = NN)
+ kappa = rep(kappa, length.out = NN); q = rep(q, length.out = NN)
+ tau = rep(tau, length.out = NN);
-rlogitlap = function(n, location.ald = 0, scale.ald = 1, tau = 0.5,
- kappa = sqrt(tau/(1-tau)), earg = list()) {
- logit(ralap(n = n, location = location.ald, scale = scale.ald,
- tau = tau, kappa = kappa), inverse = TRUE, earg = earg)
+ indexTF = (q > 0) & (q < 1)
+ qqq = logit(q[indexTF]) # earg = earg
+ ans = q
+ ans[indexTF] = palap(q = qqq, location = location.ald[indexTF],
+ scale = scale.ald[indexTF],
+ tau = tau[indexTF], kappa = kappa[indexTF])
+ ans[q >= 1] = 1
+ ans[q <= 0] = 0
+ ans
}
-dlogitlap = function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
- kappa = sqrt(tau/(1-tau)), log = FALSE,
- earg = list()) {
- if (!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
- rm(log)
- NN = max(length(x), length(location.ald),
- length(scale.ald), length(kappa))
- location = rep(location.ald, length.out = NN);
- scale = rep(scale.ald, length.out = NN)
- kappa = rep(kappa, length.out = NN);
- x = rep(x, length.out = NN)
- tau = rep(tau, length.out = NN)
- Alpha = sqrt(2) * kappa / scale.ald
- Beta = sqrt(2) / (scale.ald * kappa)
- Delta = logit(location.ald, inverse = TRUE, earg = earg)
+rprobitlap <- function(n, location.ald = 0, scale.ald = 1, tau = 0.5,
+ kappa = sqrt(tau/(1-tau))) {
- exponent = ifelse(x >= Delta, -Alpha, Beta) *
- (logit(x, earg = earg) - location.ald)
- logdensity = log(Alpha) + log(Beta) - log(Alpha + Beta) -
- log(x) - log1p(-x) + exponent
- indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
- logdensity[!indexTF] = NaN
- logdensity[x < 0 & indexTF] = -Inf
- logdensity[x > 1 & indexTF] = -Inf
- if (log.arg) logdensity else exp(logdensity)
-}
-qlogitlap = function(p, location.ald = 0, scale.ald = 1,
- tau = 0.5, kappa = sqrt(tau/(1-tau)),
- earg = list()) {
- qqq = qalap(p=p, location = location.ald, scale = scale.ald,
- tau = tau, kappa = kappa)
- ans = logit(qqq, inverse = TRUE, earg = earg)
- ans[(p < 0) | (p > 1)] = NaN
- ans[p == 0] = 0
- ans[p == 1] = 1
- ans
+ probit(ralap(n = n, location = location.ald, scale = scale.ald,
+ tau = tau, kappa = kappa),
+ inverse = TRUE)
}
-
-plogitlap = function(q, location.ald = 0, scale.ald = 1,
- tau = 0.5, kappa = sqrt(tau/(1-tau)),
- earg = list()) {
- NN = max(length(q), length(location.ald), length(scale.ald),
- length(kappa))
- location.ald = rep(location.ald, length.out = NN);
- scale.ald= rep(scale.ald, length.out = NN)
- kappa = rep(kappa, length.out = NN); q= rep(q, length.out = NN)
- tau = rep(tau, length.out = NN);
-
- indexTF = (q > 0) & (q < 1)
- qqq = logit(q[indexTF], earg = earg)
- ans = q
- ans[indexTF] = palap(q = qqq, location = location.ald[indexTF],
- scale = scale.ald[indexTF],
- tau = tau[indexTF], kappa = kappa[indexTF])
- ans[q >= 1] = 1
- ans[q <= 0] = 0
- ans
-}
-
+dprobitlap <-
+ function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
+ kappa = sqrt(tau/(1-tau)), log = FALSE,
+ meth2 = TRUE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
-rprobitlap = function(n, location.ald = 0, scale.ald = 1, tau = 0.5,
- kappa = sqrt(tau/(1-tau)), earg = list()) {
- probit(ralap(n = n, location = location.ald, scale = scale.ald,
- tau = tau, kappa = kappa), inverse = TRUE, earg = earg)
-}
+ NN = max(length(x), length(location.ald), length(scale.ald),
+ length(kappa))
+ location.ald = rep(location.ald, length.out = NN);
+ scale.ald = rep(scale.ald, length.out = NN)
+ kappa = rep(kappa, length.out = NN); x = rep(x, length.out = NN)
+ tau = rep(tau, length.out = NN)
-dprobitlap = function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
- kappa = sqrt(tau/(1-tau)), log = FALSE,
- earg = list(), meth2 = TRUE) {
- if (!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
- rm(log)
-
- NN = max(length(x), length(location.ald), length(scale.ald),
- length(kappa))
- location.ald = rep(location.ald, length.out = NN);
- scale.ald= rep(scale.ald, length.out = NN)
- kappa = rep(kappa, length.out = NN); x = rep(x, length.out = NN)
- tau = rep(tau, length.out = NN)
-
- logdensity = x * NaN
- index1 = (x > 0) & (x < 1)
- indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
- if (meth2) {
- dx.dy = x
- use.x = probit(x[index1], earg = earg)
- logdensity[index1] =
- dalap(x = use.x, location = location.ald[index1],
- scale = scale.ald[index1], tau = tau[index1],
- kappa = kappa[index1], log = TRUE)
- } else {
- Alpha = sqrt(2) * kappa / scale.ald
- Beta = sqrt(2) / (scale.ald * kappa)
- Delta = pnorm(location.ald)
- use.x = qnorm(x) # qnorm(x[index1])
- log.dy.dw = dnorm(use.x, log = TRUE)
+ logdensity = x * NaN
+ index1 = (x > 0) & (x < 1)
+ indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ if (meth2) {
+ dx.dy = x
+ use.x = probit(x[index1]) # earg = earg
+ logdensity[index1] =
+ dalap(x = use.x, location = location.ald[index1],
+ scale = scale.ald[index1], tau = tau[index1],
+ kappa = kappa[index1], log = TRUE)
+ } else {
+ Alpha = sqrt(2) * kappa / scale.ald
+ Beta = sqrt(2) / (scale.ald * kappa)
+ Delta = pnorm(location.ald)
+ use.x = qnorm(x) # qnorm(x[index1])
+ log.dy.dw = dnorm(use.x, log = TRUE)
- exponent = ifelse(x >= Delta, -Alpha, Beta) *
- (use.x - location.ald) - log.dy.dw
+ exponent = ifelse(x >= Delta, -Alpha, Beta) *
+ (use.x - location.ald) - log.dy.dw
- logdensity[index1] = (log(Alpha) + log(Beta) -
- log(Alpha + Beta) + exponent)[index1]
- }
- logdensity[!indexTF] = NaN
- logdensity[x < 0 & indexTF] = -Inf
- logdensity[x > 1 & indexTF] = -Inf
-
- if (meth2) {
- dx.dy[index1] = probit(x[index1], earg = earg,
- inverse = FALSE, deriv = 1)
- dx.dy[!index1] = 0
- dx.dy[!indexTF] = NaN
- if (log.arg) logdensity - log(abs(dx.dy)) else
- exp(logdensity) / abs(dx.dy)
- } else {
- if (log.arg) logdensity else exp(logdensity)
- }
+ logdensity[index1] = (log(Alpha) + log(Beta) -
+ log(Alpha + Beta) + exponent)[index1]
+ }
+ logdensity[!indexTF] = NaN
+ logdensity[x < 0 & indexTF] = -Inf
+ logdensity[x > 1 & indexTF] = -Inf
+
+ if (meth2) {
+ dx.dy[index1] = probit(x[index1], # earg = earg,
+ inverse = FALSE, deriv = 1)
+ dx.dy[!index1] = 0
+ dx.dy[!indexTF] = NaN
+ if (log.arg) logdensity - log(abs(dx.dy)) else
+ exp(logdensity) / abs(dx.dy)
+ } else {
+ if (log.arg) logdensity else exp(logdensity)
+ }
}
-qprobitlap = function(p, location.ald = 0, scale.ald = 1,
- tau = 0.5, kappa = sqrt(tau/(1-tau)),
- earg = list()) {
- qqq = qalap(p=p, location = location.ald, scale = scale.ald,
+qprobitlap <- function(p, location.ald = 0, scale.ald = 1,
+ tau = 0.5, kappa = sqrt(tau/(1-tau))) {
+ qqq = qalap(p = p, location = location.ald, scale = scale.ald,
tau = tau, kappa = kappa)
- ans = probit(qqq, inverse = TRUE, earg = earg)
+ ans = probit(qqq, inverse = TRUE) # , earg = earg
ans[(p < 0) | (p > 1)] = NaN
ans[p == 0] = 0
ans[p == 1] = 1
@@ -2226,118 +2159,120 @@ qprobitlap = function(p, location.ald = 0, scale.ald = 1,
-pprobitlap = function(q, location.ald = 0, scale.ald = 1,
- tau = 0.5, kappa = sqrt(tau/(1-tau)),
- earg = list()) {
- NN = max(length(q), length(location.ald), length(scale.ald),
- length(kappa))
- location.ald = rep(location.ald, length.out = NN);
- scale.ald= rep(scale.ald, length.out = NN)
- kappa = rep(kappa, length.out = NN);
- q= rep(q, length.out = NN)
- tau = rep(tau, length.out = NN);
-
- indexTF = (q > 0) & (q < 1)
- qqq = probit(q[indexTF], earg = earg)
- ans = q
- ans[indexTF] = palap(q = qqq, location = location.ald[indexTF],
- scale = scale.ald[indexTF],
- tau = tau[indexTF], kappa = kappa[indexTF])
- ans[q >= 1] = 1
- ans[q <= 0] = 0
- ans
+pprobitlap <- function(q, location.ald = 0, scale.ald = 1,
+ tau = 0.5, kappa = sqrt(tau/(1-tau))) {
+ NN = max(length(q), length(location.ald), length(scale.ald),
+ length(kappa))
+ location.ald = rep(location.ald, length.out = NN);
+ scale.ald = rep(scale.ald, length.out = NN)
+ kappa = rep(kappa, length.out = NN);
+ q = rep(q, length.out = NN)
+ tau = rep(tau, length.out = NN);
+
+ indexTF = (q > 0) & (q < 1)
+ qqq = probit(q[indexTF]) # earg = earg
+ ans = q
+ ans[indexTF] = palap(q = qqq, location = location.ald[indexTF],
+ scale = scale.ald[indexTF],
+ tau = tau[indexTF], kappa = kappa[indexTF])
+ ans[q >= 1] = 1
+ ans[q <= 0] = 0
+ ans
}
-rclogloglap = function(n, location.ald = 0, scale.ald = 1, tau = 0.5,
- kappa = sqrt(tau/(1-tau)), earg = list()) {
+rclogloglap <- function(n, location.ald = 0, scale.ald = 1, tau = 0.5,
+ kappa = sqrt(tau/(1-tau))) {
cloglog(ralap(n = n, location = location.ald, scale = scale.ald,
- tau = tau, kappa = kappa), inverse = TRUE, earg = earg)
+ tau = tau, kappa = kappa), # earg = earg,
+ inverse = TRUE)
}
-dclogloglap = function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
- kappa = sqrt(tau/(1-tau)), log = FALSE,
- earg = list(), meth2 = TRUE) {
- if (!is.logical(log.arg <- log)) stop("bad input for argument 'log'")
- rm(log)
-
- NN = max(length(x), length(location.ald), length(scale.ald),
- length(kappa))
- location.ald = rep(location.ald, length.out = NN);
- scale.ald= rep(scale.ald, length.out = NN)
- kappa = rep(kappa, length.out = NN);
- x = rep(x, length.out = NN)
- tau = rep(tau, length.out = NN)
-
- logdensity = x * NaN
- index1 = (x > 0) & (x < 1)
- indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
- if (meth2) {
- dx.dy = x
- use.w = cloglog(x[index1], earg = earg)
- logdensity[index1] =
- dalap(x = use.w, location = location.ald[index1],
- scale = scale.ald[index1],
- tau = tau[index1],
- kappa = kappa[index1], log = TRUE)
+dclogloglap <- function(x, location.ald = 0, scale.ald = 1, tau = 0.5,
+ kappa = sqrt(tau/(1-tau)), log = FALSE,
+ meth2 = TRUE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+
- } else {
- Alpha = sqrt(2) * kappa / scale.ald
- Beta = sqrt(2) / (scale.ald * kappa)
- Delta = cloglog(location.ald, inverse = TRUE)
-
- exponent = ifelse(x >= Delta, -(Alpha+1), Beta-1) * log(-log1p(-x)) +
- ifelse(x >= Delta, Alpha, -Beta) * location.ald
- logdensity[index1] = (log(Alpha) + log(Beta) -
- log(Alpha + Beta) - log1p(-x) + exponent)[index1]
- }
- logdensity[!indexTF] = NaN
- logdensity[x < 0 & indexTF] = -Inf
- logdensity[x > 1 & indexTF] = -Inf
-
- if (meth2) {
- dx.dy[index1] = cloglog(x[index1], earg = earg,
- inverse = FALSE, deriv = 1)
- dx.dy[!index1] = 0
- dx.dy[!indexTF] = NaN
- if (log.arg) logdensity - log(abs(dx.dy)) else
- exp(logdensity) / abs(dx.dy)
- } else {
- if (log.arg) logdensity else exp(logdensity)
- }
+ NN = max(length(x), length(location.ald), length(scale.ald),
+ length(kappa))
+ location.ald = rep(location.ald, length.out = NN);
+ scale.ald = rep(scale.ald, length.out = NN)
+ kappa = rep(kappa, length.out = NN);
+ x = rep(x, length.out = NN)
+ tau = rep(tau, length.out = NN)
+
+ logdensity = x * NaN
+ index1 = (x > 0) & (x < 1)
+ indexTF = (scale.ald > 0) & (tau > 0) & (tau < 1) & (kappa > 0) # &
+ if (meth2) {
+ dx.dy = x
+ use.w = cloglog(x[index1]) # earg = earg
+ logdensity[index1] =
+ dalap(x = use.w, location = location.ald[index1],
+ scale = scale.ald[index1],
+ tau = tau[index1],
+ kappa = kappa[index1], log = TRUE)
+
+ } else {
+ Alpha = sqrt(2) * kappa / scale.ald
+ Beta = sqrt(2) / (scale.ald * kappa)
+ Delta = cloglog(location.ald, inverse = TRUE)
+
+ exponent = ifelse(x >= Delta, -(Alpha+1), Beta-1) * log(-log1p(-x)) +
+ ifelse(x >= Delta, Alpha, -Beta) * location.ald
+ logdensity[index1] = (log(Alpha) + log(Beta) -
+ log(Alpha + Beta) - log1p(-x) + exponent)[index1]
+ }
+ logdensity[!indexTF] = NaN
+ logdensity[x < 0 & indexTF] = -Inf
+ logdensity[x > 1 & indexTF] = -Inf
+
+ if (meth2) {
+ dx.dy[index1] = cloglog(x[index1], # earg = earg,
+ inverse = FALSE, deriv = 1)
+ dx.dy[!index1] = 0
+ dx.dy[!indexTF] = NaN
+ if (log.arg) logdensity - log(abs(dx.dy)) else
+ exp(logdensity) / abs(dx.dy)
+ } else {
+ if (log.arg) logdensity else exp(logdensity)
+ }
}
-qclogloglap = function(p, location.ald = 0, scale.ald = 1,
- tau = 0.5, kappa = sqrt(tau/(1-tau)),
- earg = list()) {
- qqq = qalap(p=p, location = location.ald, scale = scale.ald,
- tau = tau, kappa = kappa)
- ans = cloglog(qqq, inverse = TRUE, earg = earg)
- ans[(p < 0) | (p > 1)] = NaN
- ans[p == 0] = 0
- ans[p == 1] = 1
- ans
+
+qclogloglap <- function(p, location.ald = 0, scale.ald = 1,
+ tau = 0.5, kappa = sqrt(tau/(1-tau))) {
+ qqq = qalap(p = p, location = location.ald, scale = scale.ald,
+ tau = tau, kappa = kappa)
+ ans = cloglog(qqq, inverse = TRUE) # , earg = earg
+ ans[(p < 0) | (p > 1)] = NaN
+ ans[p == 0] = 0
+ ans[p == 1] = 1
+ ans
}
-pclogloglap = function(q, location.ald = 0, scale.ald = 1,
- tau = 0.5, kappa = sqrt(tau/(1-tau)),
- earg = list()) {
+pclogloglap <- function(q, location.ald = 0, scale.ald = 1,
+ tau = 0.5, kappa = sqrt(tau/(1-tau))) {
NN = max(length(q), length(location.ald), length(scale.ald),
length(kappa))
location.ald = rep(location.ald, length.out = NN);
- scale.ald= rep(scale.ald, length.out = NN)
+ scale.ald = rep(scale.ald, length.out = NN)
kappa = rep(kappa, length.out = NN);
- q= rep(q, length.out = NN)
+ q = rep(q, length.out = NN)
tau = rep(tau, length.out = NN);
indexTF = (q > 0) & (q < 1)
- qqq = cloglog(q[indexTF], earg = earg)
+ qqq = cloglog(q[indexTF]) # earg = earg
ans = q
ans[indexTF] = palap(q = qqq, location = location.ald[indexTF],
scale = scale.ald[indexTF],
@@ -2358,27 +2293,35 @@ pclogloglap = function(q, location.ald = 0, scale.ald = 1,
alaplace2.control <- function(maxit = 100, ...)
{
- list(maxit = maxit)
+ list(maxit = maxit)
}
alaplace2 <- function(tau = NULL,
llocation = "identity", lscale = "loge",
- elocation = list(), escale = list(),
ilocation = NULL, iscale = NULL,
kappa = sqrt(tau / (1-tau)),
shrinkage.init = 0.95,
parallelLocation = FALSE, digt = 4,
- sameScale = TRUE,
+ eq.scale = TRUE,
dfmu.init = 3,
intparloc = FALSE,
imethod = 1,
zero = -2) {
- llocat <- llocation
- elocat <- elocation
+
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
ilocat <- ilocation
+
+
if (!is.Numeric(kappa, positive = TRUE))
stop("bad input for argument 'kappa'")
if (!is.Numeric(imethod, allowable.length = 1,
@@ -2400,18 +2343,13 @@ alaplace2.control <- function(maxit = 100, ...)
if (length(tau) &&
max(abs(kappa - sqrt(tau / (1 - tau)))) > 1.0e-6)
stop("arguments 'kappa' and 'tau' do not match")
- if (mode(llocat) != "character" && mode(llocat) != "name")
- llocat = as.character(substitute(llocat))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (!is.list(elocat)) elocat = list()
- if (!is.list(escale)) escale = list()
+
if (!is.logical(intparloc) || length(intparloc) != 1)
stop("argument 'intparloc' must be a single logical")
- if (!is.logical(sameScale) || length(sameScale) != 1)
- stop("argument 'sameScale' must be a single logical")
+ if (!is.logical(eq.scale) || length(eq.scale) != 1)
+ stop("argument 'eq.scale' must be a single logical")
if (!is.logical(parallelLocation) || length(parallelLocation) != 1)
stop("argument 'parallelLocation' must be a single logical")
fittedMean = FALSE
@@ -2441,7 +2379,7 @@ alaplace2.control <- function(maxit = 100, ...)
onemat = matrix(1, Mdiv2, 1)
locatHmat1 = kronecker(if ( .intparloc ) onemat else
diag(Mdiv2), rbind(1, 0))
- scaleHmat1 = kronecker(if ( .sameScale ) onemat else
+ scaleHmat1 = kronecker(if ( .eq.scale ) onemat else
diag(Mdiv2), rbind(0, 1))
locatHmatk = kronecker(if ( .PARALLEL ) onemat else
@@ -2454,7 +2392,7 @@ alaplace2.control <- function(maxit = 100, ...)
intercept = FALSE)
if (names(constraints)[1] == "(Intercept)") {
- constraints[["(Intercept)"]] = cbind(locatHmat1, scaleHmat1)
+ constraints[["(Intercept)"]] = cbind(locatHmat1, scaleHmat1)
}
@@ -2469,13 +2407,13 @@ alaplace2.control <- function(maxit = 100, ...)
if (length(orig.constraints)) {
if (!identical(orig.constraints, constraints)) {
warning("the inputted 'constraints' argument does not match with ",
- "the 'zero', 'parallel', 'sameScale' arguments. ",
+ "the 'zero', 'parallel', 'eq.scale' arguments. ",
"Using the inputted 'constraints'.")
constraints = orig.constraints
}
}
- }), list( .sameScale = sameScale,
+ }), list( .eq.scale = eq.scale,
.parallelLocation = parallelLocation,
.intparloc = intparloc,
.zero = zero ))),
@@ -2485,13 +2423,25 @@ alaplace2.control <- function(maxit = 100, ...)
}, list( .zero = zero ))),
initialize = eval(substitute(expression({
extra$Musual <- Musual <- 2
- y <- cbind(y)
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = if (length( .kappa ) > 1) 1 else Inf,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
extra$ncoly <- ncoly <- ncol(y)
if ((ncoly > 1) && (length( .kappa ) > 1))
stop("response must be a vector if 'kappa' or 'tau' ",
"has a length greater than one")
+
extra$kappa = .kappa
extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
@@ -2515,8 +2465,8 @@ alaplace2.control <- function(maxit = 100, ...)
mynames1 <- paste("location", if (Mdiv2 > 1) 1:Mdiv2 else "", sep = "")
mynames2 <- paste("scale", if (Mdiv2 > 1) 1:Mdiv2 else "", sep = "")
predictors.names <-
- c(namesof(mynames1, .llocat, earg = .elocat, tag = FALSE),
- namesof(mynames2, .lscale, earg = .escale, tag = FALSE))
+ c(namesof(mynames1, .llocat , earg = .elocat, tag = FALSE),
+ namesof(mynames2, .lscale , earg = .escale, tag = FALSE))
predictors.names <-
predictors.names[interleave.VGAM(M, M = Musual)]
@@ -2528,38 +2478,41 @@ alaplace2.control <- function(maxit = 100, ...)
for(jay in 1:Mdiv2) {
y.use <- if (ncoly > 1) y[, jay] else y
if ( .imethod == 1) {
- locat.init[, jay] = weighted.mean(y.use, w)
+ locat.init[, jay] = weighted.mean(y.use, w[, jay])
scale.init[, jay] = sqrt(var(y.use) / 2)
} else if ( .imethod == 2) {
locat.init[, jay] = median(y.use)
- scale.init[, jay] =
- sqrt(sum(w * abs(y - median(y.use))) / (sum(w) * 2))
+ scale.init[, jay] = sqrt(sum(c(w[, jay]) *
+ abs(y - median(y.use))) / (sum(w[, jay]) * 2))
} else if ( .imethod == 3) {
Fit5 = vsmooth.spline(x = x[, min(ncol(x), 2)],
- y = y.use, w = w, df = .dfmu.init)
+ y = y.use, w = w[, jay],
+ df = .dfmu.init )
locat.init[, jay] = predict(Fit5, x = x[, min(ncol(x), 2)])$y
scale.init[, jay] =
- sqrt(sum(w * abs(y.use - median(y.use))) / (sum(w) * 2))
+ sqrt(sum(c(w[, jay]) *
+ abs(y.use - median(y.use))) / (sum(w[, jay]) * 2))
} else {
- use.this = weighted.mean(y.use, w)
+ use.this = weighted.mean(y.use, w[, jay])
locat.init[, jay] = (1 - .sinit) * y.use + .sinit * use.this
scale.init[, jay] =
- sqrt(sum(w * abs(y.use - median(y.use ))) / (sum(w) * 2))
+ sqrt(sum(c(w[, jay]) *
+ abs(y.use - median(y.use ))) / (sum(w[, jay]) * 2))
}
}
if (length( .ilocat )) {
- locat.init = matrix( .ilocat , n, Mdiv2, byrow = TRUE)
+ locat.init = matrix( .ilocat , n, Mdiv2, byrow = TRUE)
}
if (length( .iscale )) {
- scale.init = matrix( .iscale , n, Mdiv2, byrow = TRUE)
+ scale.init = matrix( .iscale , n, Mdiv2, byrow = TRUE)
}
etastart =
- cbind(theta2eta(locat.init, .llocat, earg = .elocat),
- theta2eta(scale.init, .lscale, earg = .escale))
+ cbind(theta2eta(locat.init, .llocat , earg = .elocat ),
+ theta2eta(scale.init, .lscale , earg = .escale ))
etastart = etastart[, interleave.VGAM(M, M = Musual), drop = FALSE]
}
}), list( .imethod = imethod,
@@ -2571,13 +2524,13 @@ alaplace2.control <- function(maxit = 100, ...)
linkinv = eval(substitute(function(eta, extra = NULL) {
Mdiv2 = extra$Mdiv2
locat = eta2theta(eta[, 2 * (1:Mdiv2) - 1, drop = FALSE],
- .llocat, earg = .elocat)
+ .llocat , earg = .elocat )
dimnames(locat) = list(dimnames(eta)[[1]], extra$y.names)
myans <- if ( .fittedMean ) {
kappamat = matrix(extra$kappa, extra$n, extra$Mdiv2,
byrow = TRUE)
Scale = eta2theta(eta[, 2 * (1:Mdiv2) , drop = FALSE],
- .lscale, earg = .escale)
+ .lscale , earg = .escale )
locat + Scale * (1/kappamat - kappamat)
} else {
locat
@@ -2599,13 +2552,14 @@ alaplace2.control <- function(maxit = 100, ...)
misc$earg = vector("list", M)
misc$Musual <- Musual
- names(misc$earg) = names(misc$link)
for(ii in 1:Mdiv2) {
misc$earg[[Musual * ii - 1]] = .elocat
misc$earg[[Musual * ii ]] = .escale
}
+ names(misc$earg) = names(misc$link)
+ misc$multipleResponses <- TRUE
misc$expected = TRUE
extra$kappa = misc$kappa = .kappa
extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
@@ -2616,7 +2570,8 @@ alaplace2.control <- function(maxit = 100, ...)
locat = as.matrix(locat)
for(ii in 1:Mdiv2) {
y.use <- if (ncoly > 1) y[, ii] else y
- extra$percentile[ii] = 100 * weighted.mean(y.use <= locat[, ii], w)
+ extra$percentile[ii] = 100 * weighted.mean(y.use <= locat[, ii],
+ w[, ii])
}
# if (ncoly > 1) names(misc$link) else zz:
names(extra$percentile) = y.names
@@ -2627,18 +2582,19 @@ alaplace2.control <- function(maxit = 100, ...)
.kappa = kappa ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ Musual <- 2
Mdiv2 = extra$Mdiv2
ymat = matrix(y, extra$n, extra$Mdiv2)
kappamat = matrix(extra$kappa, extra$n, extra$Mdiv2, byrow = TRUE)
locat = eta2theta(eta[, 2 * (1:Mdiv2) - 1, drop = FALSE],
- .llocat, earg = .elocat)
+ .llocat , earg = .elocat )
Scale = eta2theta(eta[, 2 * (1:Mdiv2) , drop = FALSE],
- .lscale, earg = .escale)
+ .lscale , earg = .escale )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- sum(w * dalap(x = c(ymat), location = c(locat),
+ sum(c(w) * dalap(x = c(ymat), location = c(locat),
scale = c(Scale), kappa = c(kappamat),
log = TRUE))
}
@@ -2647,13 +2603,14 @@ alaplace2.control <- function(maxit = 100, ...)
.kappa = kappa ))),
vfamily = c("alaplace2"),
deriv = eval(substitute(expression({
+ Musual <- 2
Mdiv2 = extra$Mdiv2
ymat = matrix(y, n, Mdiv2)
- locat = eta2theta(eta[, 2 * (1:(Mdiv2)) - 1, drop = FALSE],
- .llocat, earg = .elocat)
- Scale = eta2theta(eta[, 2 * (1:(Mdiv2)) , drop = FALSE],
- .lscale, earg = .escale)
+ locat = eta2theta(eta[, Musual * (1:(Mdiv2)) - 1, drop = FALSE],
+ .llocat , earg = .elocat )
+ Scale = eta2theta(eta[, Musual * (1:(Mdiv2)) , drop = FALSE],
+ .lscale , earg = .escale )
kappamat = matrix(extra$kappa, n, Mdiv2, byrow = TRUE)
@@ -2662,8 +2619,8 @@ alaplace2.control <- function(maxit = 100, ...)
sign(ymat - locat) / Scale
dl.dscale = sqrt(2) * ifelse(ymat >= locat, kappamat, 1/kappamat) *
zedd / Scale - 1 / Scale
- dlocat.deta = dtheta.deta(locat, .llocat, earg = .elocat)
- dscale.deta = dtheta.deta(Scale, .lscale, earg = .escale)
+ dlocat.deta = dtheta.deta(locat, .llocat , earg = .elocat )
+ dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
ans <- c(w) * cbind(dl.dlocat * dlocat.deta,
dl.dscale * dscale.deta)
@@ -2674,11 +2631,12 @@ alaplace2.control <- function(maxit = 100, ...)
.kappa = kappa ))),
weight = eval(substitute(expression({
wz <- matrix(as.numeric(NA), n, M)
+
d2l.dlocat2 = 2 / Scale^2
d2l.dscale2 = 1 / Scale^2
- wz[, 2*(1:Mdiv2) - 1] <- d2l.dlocat2 * dlocat.deta^2
- wz[, 2*(1:Mdiv2) ] <- d2l.dscale2 * dscale.deta^2
+ wz[, Musual*(1:Mdiv2) - 1] <- d2l.dlocat2 * dlocat.deta^2
+ wz[, Musual*(1:Mdiv2) ] <- d2l.dscale2 * dscale.deta^2
c(w) * wz
}), list( .escale = escale, .lscale = lscale,
@@ -2704,51 +2662,65 @@ alaplace1.control <- function(maxit = 100, ...)
- alaplace1 = function(tau = NULL,
- llocation = "identity",
- elocation = list(),
- ilocation = NULL,
- kappa = sqrt(tau/(1-tau)),
- Scale.arg = 1,
- shrinkage.init = 0.95,
- parallelLocation = FALSE, digt = 4,
- dfmu.init = 3,
- intparloc = FALSE,
- imethod = 1) {
+ alaplace1 <- function(tau = NULL,
+ llocation = "identity",
+ ilocation = NULL,
+ kappa = sqrt(tau/(1-tau)),
+ Scale.arg = 1,
+ shrinkage.init = 0.95,
+ parallelLocation = FALSE, digt = 4,
+ dfmu.init = 3,
+ intparloc = FALSE,
+ imethod = 1) {
- if (!is.Numeric(kappa, positive = TRUE))
- stop("bad input for argument 'kappa'")
- if (length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
- stop("arguments 'kappa' and 'tau' do not match")
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 4)
- stop("argument 'imethod' must be 1, 2 or ... 4")
+ if (!is.Numeric(kappa, positive = TRUE))
+ stop("bad input for argument 'kappa'")
+ if (length(tau) &&
+ max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
+ stop("arguments 'kappa' and 'tau' do not match")
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 4)
+ stop("argument 'imethod' must be 1, 2 or ... 4")
+
+
+ llocation <- llocation
+
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
+ ilocat <- ilocation
+
+
+ if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+ shrinkage.init < 0 ||
+ shrinkage.init > 1)
+ stop("bad input for argument 'shrinkage.init'")
+ if (!is.Numeric(Scale.arg, positive = TRUE))
+ stop("bad input for argument 'Scale.arg'")
+
+
+ if (!is.logical(parallelLocation) ||
+ length(parallelLocation) != 1)
+ stop("bad input for argument 'parallelLocation'")
+
+
+
+ fittedMean = FALSE
+ if (!is.logical(fittedMean) || length(fittedMean) != 1)
+ stop("bad input for argument 'fittedMean'")
- if (!is.list(elocation)) elocation = list()
- if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
- shrinkage.init < 0 ||
- shrinkage.init > 1)
- stop("bad input for argument 'shrinkage.init'")
- if (!is.Numeric(Scale.arg, positive = TRUE))
- stop("bad input for argument 'Scale.arg'")
- if (!is.logical(parallelLocation) || length(parallelLocation) != 1)
- stop("bad input for argument 'parallelLocation'")
- fittedMean = FALSE
- if (!is.logical(fittedMean) || length(fittedMean) != 1)
- stop("bad input for argument 'fittedMean'")
new("vglmff",
blurb = c("One-parameter asymmetric Laplace distribution\n\n",
"Links: ",
- namesof("location", llocation, earg = elocation),
+ namesof("location", llocat, earg = elocat),
"\n", "\n",
"Mean: location + scale * (1/kappa - kappa) / ",
"sqrt(2)", "\n",
@@ -2778,7 +2750,7 @@ alaplace1.control <- function(maxit = 100, ...)
if (length(orig.constraints)) {
if (!identical(orig.constraints, constraints)) {
warning("the inputted 'constraints' argument does not match with ",
- "the 'parallel', 'sameScale' arguments. ",
+ "the 'parallel', 'eq.scale' arguments. ",
"Using the inputted 'constraints'.")
constraints = orig.constraints
}
@@ -2794,9 +2766,22 @@ alaplace1.control <- function(maxit = 100, ...)
.tau = tau ))),
initialize = eval(substitute(expression({
extra$Musual <- Musual <- 1
- y <- cbind(y)
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = if (length( .kappa ) > 1) 1 else Inf,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
extra$ncoly <- ncoly <- ncol(y)
- if ((ncoly > 1) && (length( .kappa ) > 1 || length( .Scale.arg ) > 1))
+ if ((ncoly > 1) && (length( .kappa ) > 1 ||
+ length( .Scale.arg ) > 1))
stop("response must be a vector if 'kappa' or 'Scale.arg' ",
"has a length greater than one")
@@ -2827,7 +2812,7 @@ alaplace1.control <- function(maxit = 100, ...)
mynames1 <- paste("location", if (M > 1) 1:M else "", sep = "")
predictors.names <-
- c(namesof(mynames1, .llocat, earg = .elocat, tag = FALSE))
+ c(namesof(mynames1, .llocat , earg = .elocat, tag = FALSE))
locat.init <- matrix(0, n, M)
@@ -2840,12 +2825,12 @@ alaplace1.control <- function(maxit = 100, ...)
} else if ( .imethod == 2) {
locat.init[, jay] = median(y.use)
} else if ( .imethod == 3) {
- Fit5 = vsmooth.spline(x = x[, min(ncol(x), 2)],
- y = y.use, w = w, df = .dfmu.init)
- locat.init[, jay] = c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
+ Fit5 = vsmooth.spline(x = x[, min(ncol(x), 2)],
+ y = y.use, w = w, df = .dfmu.init)
+ locat.init[, jay] = c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
} else {
- use.this = weighted.mean(y.use, w)
- locat.init[, jay] = (1- .sinit) * y.use + .sinit * use.this
+ use.this = weighted.mean(y.use, w)
+ locat.init[, jay] = (1- .sinit) * y.use + .sinit * use.this
}
@@ -2855,39 +2840,40 @@ alaplace1.control <- function(maxit = 100, ...)
if ( .llocat == "loge") locat.init = abs(locat.init)
etastart =
- cbind(theta2eta(locat.init, .llocat, earg = .elocat))
+ cbind(theta2eta(locat.init, .llocat , earg = .elocat ))
}
}
}), list( .imethod = imethod,
.dfmu.init = dfmu.init,
.sinit = shrinkage.init, .digt = digt,
- .elocat = elocation, .Scale.arg = Scale.arg,
- .llocat = llocation, .kappa = kappa,
- .ilocat = ilocation ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- if ( .fittedMean ) {
- kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
- location = eta2theta(eta, .llocat, earg = .elocat)
- Scale = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
- location + Scale * (1/kappamat - kappamat)
- } else {
- location = eta2theta(eta, .llocat, earg = .elocat)
- if (length(location) > extra$n)
- dimnames(location) = list(dimnames(eta)[[1]], extra$y.names)
- location
- }
- }, list( .elocat = elocation, .llocat = llocation,
- .fittedMean = fittedMean, .Scale.arg = Scale.arg,
- .kappa = kappa ))),
- last = eval(substitute(expression({
+ .elocat = elocat, .Scale.arg = Scale.arg,
+ .llocat = llocat, .kappa = kappa,
+ .ilocat = ilocat ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ if ( .fittedMean ) {
+ kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
+ locat = eta2theta(eta, .llocat , earg = .elocat )
+ Scale = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ locat + Scale * (1/kappamat - kappamat)
+ } else {
+ locat = eta2theta(eta, .llocat , earg = .elocat )
+ if (length(locat) > extra$n)
+ dimnames(locat) = list(dimnames(eta)[[1]], extra$y.names)
+ locat
+ }
+ }, list( .elocat = elocat, .llocat = llocat,
+ .fittedMean = fittedMean, .Scale.arg = Scale.arg,
+ .kappa = kappa ))),
+ last = eval(substitute(expression({
Musual <- extra$Musual
+ misc$Musual <- Musual
+ misc$multipleResponses <- TRUE
tmp34 = c(rep( .llocat , length = M))
names(tmp34) = mynames1
misc$link = tmp34 # Already named
misc$earg = vector("list", M)
- misc$Musual <- Musual
names(misc$earg) = names(misc$link)
for(ii in 1:M) {
misc$earg[[ii]] = .elocat
@@ -2900,53 +2886,60 @@ alaplace1.control <- function(maxit = 100, ...)
misc$true.mu = .fittedMean # @fitted is not a true mu?
extra$percentile = numeric(M)
- locat = as.matrix(location)
+ locat = as.matrix(locat)
for(ii in 1:M) {
y.use <- if (ncoly > 1) y[, ii] else y
- extra$percentile[ii] = 100 * weighted.mean(y.use <= locat[, ii], w)
+ extra$percentile[ii] =
+ 100 * weighted.mean(y.use <= locat[, ii], w)
}
names(extra$percentile) = y.names
extra$Scale.arg = .Scale.arg
- }), list( .elocat = elocation,
- .llocat = llocation,
+ }), list( .elocat = elocat,
+ .llocat = llocat,
.Scale.arg = Scale.arg, .fittedMean = fittedMean,
.kappa = kappa ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- ymat = matrix(y, extra$n, extra$M)
- kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
- location = eta2theta(eta, .llocat, earg = .elocat)
- Scale = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
- if (residuals) {
- stop("loglikelihood residuals not implemented yet")
- } else {
- sum(w * dalap(x = c(ymat), location = c(location),
- scale = c(Scale), kappa = c(kappamat), log = TRUE))
- }
- }, list( .elocat = elocation,
- .llocat = llocation,
- .Scale.arg = Scale.arg, .kappa = kappa ))),
- vfamily = c("alaplace1"),
- deriv = eval(substitute(expression({
- ymat = matrix(y, n, M)
- Scale = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
- location = eta2theta(eta, .llocat, earg = .elocat)
- kappamat = matrix(extra$kappa, n, M, byrow = TRUE)
- zedd = abs(ymat-location) / Scale
- dl.dlocation = ifelse(ymat >= location, kappamat, 1/kappamat) *
- sqrt(2) * sign(ymat - location) / Scale
- dlocation.deta = dtheta.deta(location, .llocat, earg = .elocat)
- c(w) * cbind(dl.dlocation * dlocation.deta)
- }), list( .Scale.arg = Scale.arg, .elocat = elocation,
- .llocat = llocation, .kappa = kappa ))),
- weight = eval(substitute(expression({
- d2l.dlocation2 = 2 / Scale^2
- wz = cbind(d2l.dlocation2 * dlocation.deta^2)
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ ymat = matrix(y, extra$n, extra$M)
+ kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
+ locat = eta2theta(eta, .llocat , earg = .elocat )
+ Scale = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
- c(w) * wz
- }), list( .Scale.arg = Scale.arg,
- .elocat = elocation, .llocat = llocation ))))
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ sum(c(w) * dalap(x = c(ymat), locat = c(locat),
+ scale = c(Scale), kappa = c(kappamat), log = TRUE))
+ }
+ }, list( .elocat = elocat,
+ .llocat = llocat,
+ .Scale.arg = Scale.arg, .kappa = kappa ))),
+ vfamily = c("alaplace1"),
+ deriv = eval(substitute(expression({
+ ymat = matrix(y, n, M)
+ Scale = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+
+ locat = eta2theta(eta, .llocat , earg = .elocat )
+
+ kappamat = matrix(extra$kappa, n, M, byrow = TRUE)
+ zedd = abs(ymat-locat) / Scale
+
+ dl.dlocat = ifelse(ymat >= locat, kappamat, 1/kappamat) *
+ sqrt(2) * sign(ymat - locat) / Scale
+ dlocat.deta = dtheta.deta(locat, .llocat , earg = .elocat )
+
+ c(w) * cbind(dl.dlocat * dlocat.deta)
+ }), list( .Scale.arg = Scale.arg, .elocat = elocat,
+ .llocat = llocat, .kappa = kappa ))),
+
+ weight = eval(substitute(expression({
+ d2l.dlocat2 = 2 / Scale^2
+ wz = cbind(d2l.dlocat2 * dlocat.deta^2)
+
+ c(w) * wz
+ }), list( .Scale.arg = Scale.arg,
+ .elocat = elocat, .llocat = llocat ))))
}
@@ -2959,155 +2952,170 @@ alaplace1.control <- function(maxit = 100, ...)
alaplace3.control <- function(maxit = 100, ...)
{
- list(maxit = maxit)
+ list(maxit = maxit)
}
- alaplace3 = function(
+ alaplace3 <- function(
llocation = "identity", lscale = "loge", lkappa = "loge",
- elocation = list(), escale = list(), ekappa = list(),
ilocation = NULL, iscale = NULL, ikappa = 1.0,
imethod = 1, zero = 2:3) {
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (mode(lkappa) != "character" && mode(lkappa) != "name")
- lkappa = as.character(substitute(lkappa))
-
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
- if (length(iscale) &&
- !is.Numeric(iscale, positive = TRUE))
- stop("bad input for argument 'iscale'")
-
- if (!is.list(elocation)) elocation = list()
- if (!is.list(escale)) escale = list()
- if (!is.list(ekappa)) ekappa = list()
-
- new("vglmff",
- blurb = c("Three-parameter asymmetric Laplace distribution\n\n",
- "Links: ",
- namesof("location", llocation, earg = elocation), ", ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("kappa", lkappa, earg = ekappa),
- "\n", "\n",
- "Mean: location + scale * (1/kappa - kappa) / sqrt(2)",
- "\n",
- "Variance: Scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("location", .llocat, earg = .elocat, tag = FALSE),
- namesof("scale", .lscale, earg = .escale, tag = FALSE),
- namesof("kappa", .lkappa, earg = .ekappa, tag = FALSE))
- if (!length(etastart)) {
- kappa.init = if (length( .ikappa))
- rep( .ikappa, length.out = n) else
- rep( 1.0, length.out = n)
- if ( .imethod == 1) {
- locat.init = median(y)
- scale.init = sqrt(var(y) / 2)
- } else {
- locat.init = y
- scale.init = sqrt(sum(w*abs(y-median(y ))) / (sum(w) *2))
- }
- locat.init = if (length( .ilocat))
- rep( .ilocat, length.out = n) else
- rep(locat.init, length.out = n)
- scale.init = if (length( .iscale))
- rep( .iscale, length.out = n) else
- rep(scale.init, length.out = n)
- etastart =
- cbind(theta2eta(locat.init, .llocat, earg = .elocat),
- theta2eta(scale.init, .lscale, earg = .escale),
- theta2eta(kappa.init, .lkappa, earg = .ekappa))
- }
- }), list( .imethod = imethod,
- .elocat = elocation, .escale = escale, .ekappa = ekappa,
- .llocat = llocation, .lscale = lscale, .lkappa = lkappa,
- .ilocat = ilocation, .iscale = iscale, .ikappa = ikappa ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- location = eta2theta(eta[, 1], .llocat, earg = .elocat)
- Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
- kappa = eta2theta(eta[, 3], .lkappa, earg = .ekappa)
- location + Scale * (1/kappa - kappa) / sqrt(2)
- }, list( .elocat = elocation, .llocat = llocation,
- .escale = escale, .lscale = lscale,
- .ekappa = ekappa, .lkappa = lkappa ))),
- last = eval(substitute(expression({
- misc$link = c(location = .llocat,
- scale = .lscale,
- kappa = .lkappa)
- misc$earg = list(location = .elocat,
- scale = .escale,
- kappa = .ekappa)
- misc$expected = TRUE
- }), list( .elocat = elocation, .llocat = llocation,
- .escale = escale, .lscale = lscale,
- .ekappa = ekappa, .lkappa = lkappa ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- location = eta2theta(eta[, 1], .llocat, earg = .elocat)
- Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
- kappamat = eta2theta(eta[, 3], .lkappa, earg = .ekappa)
- if (residuals) {
- stop("loglikelihood residuals not implemented yet")
- } else {
- sum(w * dalap(x = y, location = location,
- scale=Scale, kappa = kappamat, log = TRUE))
- }
- }, list( .elocat = elocation, .llocat = llocation,
- .escale = escale, .lscale = lscale,
- .ekappa = ekappa, .lkappa = lkappa ))),
- vfamily = c("alaplace3"),
- deriv = eval(substitute(expression({
- location = eta2theta(eta[, 1], .llocat, earg = .elocat)
- Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
- kappa = eta2theta(eta[, 3], .lkappa, earg = .ekappa)
- zedd = abs(y-location) / Scale
- dl.dlocation = sqrt(2) * ifelse(y >= location, kappa, 1/kappa) *
- sign(y-location) / Scale
- dl.dscale = sqrt(2) * ifelse(y >= location, kappa, 1/kappa) *
- zedd / Scale - 1 / Scale
- dl.dkappa = 1 / kappa - 2 * kappa / (1+kappa^2) -
- (sqrt(2) / Scale) *
- ifelse(y > location, 1, -1/kappa^2) * abs(y-location)
- dlocation.deta = dtheta.deta(location, .llocat, earg = .elocat)
- dscale.deta = dtheta.deta(Scale, .lscale, earg = .escale)
- dkappa.deta = dtheta.deta(kappa, .lkappa, earg = .ekappa)
- c(w) * cbind(dl.dlocation * dlocation.deta,
- dl.dscale * dscale.deta,
- dl.dkappa * dkappa.deta)
- }), list( .escale = escale, .lscale = lscale,
- .elocat = elocation, .llocat = llocation,
- .ekappa = ekappa, .lkappa = lkappa ))),
- weight = eval(substitute(expression({
- d2l.dlocation2 = 2 / Scale^2
- d2l.dscale2 = 1 / Scale^2
- d2l.dkappa2 = 1 / kappa^2 + 4 / (1+kappa^2)^2
- d2l.dkappadloc = -sqrt(8) / ((1+kappa^2) * Scale)
- d2l.dkappadscale = -(1-kappa^2) / ((1+kappa^2) * kappa * Scale)
- wz = matrix(0, nrow = n, dimm(M))
- wz[,iam(1,1,M)] = d2l.dlocation2 * dlocation.deta^2
- wz[,iam(2,2,M)] = d2l.dscale2 * dscale.deta^2
- wz[,iam(3,3,M)] = d2l.dkappa2 * dkappa.deta^2
- wz[,iam(1,3,M)] = d2l.dkappadloc * dkappa.deta * dlocation.deta
- wz[,iam(2,3,M)] = d2l.dkappadscale * dkappa.deta * dscale.deta
- c(w) * wz
- }), list( .escale = escale, .lscale = lscale,
- .elocat = elocation, .llocat = llocation ))))
+
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
+ ilocat <- ilocation
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+ lkappa <- as.list(substitute(lkappa))
+ ekappa <- link2list(lkappa)
+ lkappa <- attr(ekappa, "function.name")
+
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+ if (length(iscale) &&
+ !is.Numeric(iscale, positive = TRUE))
+ stop("bad input for argument 'iscale'")
+
+
+ new("vglmff",
+ blurb = c("Three-parameter asymmetric Laplace distribution\n\n",
+ "Links: ",
+ namesof("location", llocat, earg = elocat), ", ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("kappa", lkappa, earg = ekappa),
+ "\n", "\n",
+ "Mean: location + scale * (1/kappa - kappa) / sqrt(2)",
+ "\n",
+ "Variance: Scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 1)
+
+
+
+ predictors.names <-
+ c(namesof("location", .llocat , earg = .elocat, tag = FALSE),
+ namesof("scale", .lscale , earg = .escale, tag = FALSE),
+ namesof("kappa", .lkappa , earg = .ekappa, tag = FALSE))
+
+ if (!length(etastart)) {
+ kappa.init = if (length( .ikappa ))
+ rep( .ikappa, length.out = n) else
+ rep( 1.0, length.out = n)
+ if ( .imethod == 1) {
+ locat.init = median(y)
+ scale.init = sqrt(var(y) / 2)
+ } else {
+ locat.init = y
+ scale.init = sqrt(sum(c(w)*abs(y-median(y ))) / (sum(w) *2))
+ }
+ locat.init = if (length( .ilocat))
+ rep( .ilocat, length.out = n) else
+ rep(locat.init, length.out = n)
+ scale.init = if (length( .iscale))
+ rep( .iscale, length.out = n) else
+ rep(scale.init, length.out = n)
+ etastart =
+ cbind(theta2eta(locat.init, .llocat , earg = .elocat ),
+ theta2eta(scale.init, .lscale , earg = .escale ),
+ theta2eta(kappa.init, .lkappa, earg = .ekappa))
+ }
+ }), list( .imethod = imethod,
+ .elocat = elocat, .escale = escale, .ekappa = ekappa,
+ .llocat = llocat, .lscale = lscale, .lkappa = lkappa,
+ .ilocat = ilocat, .iscale = iscale, .ikappa = ikappa ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ locat = eta2theta(eta[, 1], .llocat , earg = .elocat )
+ Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+ kappa = eta2theta(eta[, 3], .lkappa, earg = .ekappa)
+ locat + Scale * (1/kappa - kappa) / sqrt(2)
+ }, list( .elocat = elocat, .llocat = llocat,
+ .escale = escale, .lscale = lscale,
+ .ekappa = ekappa, .lkappa = lkappa ))),
+ last = eval(substitute(expression({
+ misc$link = c(location = .llocat ,
+ scale = .lscale ,
+ kappa = .lkappa )
+
+ misc$earg = list(location = .elocat,
+ scale = .escale,
+ kappa = .ekappa )
+
+ misc$expected = TRUE
+ }), list( .elocat = elocat, .llocat = llocat,
+ .escale = escale, .lscale = lscale,
+ .ekappa = ekappa, .lkappa = lkappa ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ locat = eta2theta(eta[, 1], .llocat , earg = .elocat )
+ Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+ kappa = eta2theta(eta[, 3], .lkappa , earg = .ekappa ) # a matrix
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ sum(c(w) * dalap(x = y, locat = locat,
+ scale = Scale, kappa = kappa, log = TRUE))
+ }
+ }, list( .elocat = elocat, .llocat = llocat,
+ .escale = escale, .lscale = lscale,
+ .ekappa = ekappa, .lkappa = lkappa ))),
+ vfamily = c("alaplace3"),
+ deriv = eval(substitute(expression({
+ locat = eta2theta(eta[, 1], .llocat , earg = .elocat )
+ Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+ kappa = eta2theta(eta[, 3], .lkappa, earg = .ekappa)
+
+ zedd = abs(y - locat) / Scale
+ dl.dlocat = sqrt(2) * ifelse(y >= locat, kappa, 1/kappa) *
+ sign(y-locat) / Scale
+ dl.dscale = sqrt(2) * ifelse(y >= locat, kappa, 1/kappa) *
+ zedd / Scale - 1 / Scale
+ dl.dkappa = 1 / kappa - 2 * kappa / (1+kappa^2) -
+ (sqrt(2) / Scale) *
+ ifelse(y > locat, 1, -1/kappa^2) * abs(y-locat)
+
+ dlocat.deta = dtheta.deta(locat, .llocat , earg = .elocat )
+ dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
+ dkappa.deta = dtheta.deta(kappa, .lkappa, earg = .ekappa)
+
+ c(w) * cbind(dl.dlocat * dlocat.deta,
+ dl.dscale * dscale.deta,
+ dl.dkappa * dkappa.deta)
+ }), list( .escale = escale, .lscale = lscale,
+ .elocat = elocat, .llocat = llocat,
+ .ekappa = ekappa, .lkappa = lkappa ))),
+ weight = eval(substitute(expression({
+ d2l.dlocat2 = 2 / Scale^2
+ d2l.dscale2 = 1 / Scale^2
+ d2l.dkappa2 = 1 / kappa^2 + 4 / (1+kappa^2)^2
+ d2l.dkappadloc = -sqrt(8) / ((1+kappa^2) * Scale)
+ d2l.dkappadscale = -(1-kappa^2) / ((1+kappa^2) * kappa * Scale)
+ wz = matrix(0, nrow = n, dimm(M))
+ wz[,iam(1, 1, M)] = d2l.dlocat2 * dlocat.deta^2
+ wz[,iam(2, 2, M)] = d2l.dscale2 * dscale.deta^2
+ wz[,iam(3, 3, M)] = d2l.dkappa2 * dkappa.deta^2
+ wz[,iam(1, 3, M)] = d2l.dkappadloc * dkappa.deta * dlocat.deta
+ wz[,iam(2, 3, M)] = d2l.dkappadscale * dkappa.deta * dscale.deta
+ c(w) * wz
+ }), list( .escale = escale, .lscale = lscale,
+ .elocat = elocat, .llocat = llocat ))))
}
@@ -3116,17 +3124,19 @@ alaplace3.control <- function(maxit = 100, ...)
-dlaplace = function(x, location = 0, scale = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
+dlaplace <- function(x, location = 0, scale = 1, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
+
+
logdensity = (-abs(x-location)/scale) - log(2*scale)
if (log.arg) logdensity else exp(logdensity)
}
-plaplace = function(q, location = 0, scale = 1) {
+plaplace <- function(q, location = 0, scale = 1) {
if (!is.Numeric(scale, positive = TRUE))
stop("argument 'scale' must be positive")
zedd = (q-location) / scale
@@ -3139,7 +3149,7 @@ plaplace = function(q, location = 0, scale = 1) {
}
-qlaplace = function(p, location = 0, scale = 1) {
+qlaplace <- function(p, location = 0, scale = 1) {
if (!is.Numeric(scale, positive = TRUE))
stop("argument 'scale' must be positive")
L = max(length(p), length(location), length(scale))
@@ -3151,7 +3161,7 @@ qlaplace = function(p, location = 0, scale = 1) {
}
-rlaplace = function(n, location = 0, scale = 1) {
+rlaplace <- function(n, location = 0, scale = 1) {
if (!is.Numeric(n, positive = TRUE,
integer.valued = TRUE, allowable.length = 1))
stop("bad input for argument 'n'")
@@ -3164,114 +3174,131 @@ rlaplace = function(n, location = 0, scale = 1) {
}
- laplace = function(llocation = "identity", lscale = "loge",
- elocation = list(), escale = list(),
- ilocation = NULL, iscale = NULL,
- imethod = 1, zero = 2) {
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
-
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 3)
- stop("argument 'imethod' must be 1 or 2 or 3")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
-
- if (!is.list(elocation)) elocation = list()
- if (!is.list(escale)) escale = list()
-
- if (length(iscale) &&
- !is.Numeric(iscale, positive = TRUE))
- stop("bad input for argument 'iscale'")
-
- new("vglmff",
- blurb = c("Two-parameter Laplace distribution\n\n",
- "Links: ",
- namesof("location", llocation, earg = elocation), ", ",
- namesof("scale", lscale, earg = escale),
- "\n", "\n",
- "Mean: location", "\n",
+ laplace <- function(llocation = "identity", lscale = "loge",
+ ilocation = NULL, iscale = NULL,
+ imethod = 1, zero = 2) {
+
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
+ ilocat <- ilocation
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 3)
+ stop("argument 'imethod' must be 1 or 2 or 3")
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+ if (length(iscale) &&
+ !is.Numeric(iscale, positive = TRUE))
+ stop("bad input for argument 'iscale'")
+
+
+ new("vglmff",
+ blurb = c("Two-parameter Laplace distribution\n\n",
+ "Links: ",
+ namesof("location", llocat, earg = elocat), ", ",
+ namesof("scale", lscale, earg = escale),
+ "\n", "\n",
+ "Mean: location", "\n",
"Variance: 2*scale^2"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("location", .llocat, earg = .elocat, tag = FALSE),
- namesof("scale", .lscale, earg = .escale, tag = FALSE))
- if (!length(etastart)) {
- if ( .imethod == 1) {
- locat.init = median(y)
- scale.init = sqrt(var(y) / 2)
- } else if ( .imethod == 2) {
- locat.init = weighted.mean(y, w)
- scale.init = sqrt(var(y) / 2)
- } else {
- locat.init = median(y)
- scale.init = sqrt(sum(w*abs(y-median(y ))) / (sum(w) *2))
- }
- locat.init = if (length( .ilocat))
- rep( .ilocat, length.out = n) else
- rep(locat.init, length.out = n)
- scale.init = if (length( .iscale))
- rep( .iscale, length.out = n) else
- rep(scale.init, length.out = n)
- etastart =
- cbind(theta2eta(locat.init, .llocat, earg = .elocat),
- theta2eta(scale.init, .lscale, earg = .escale))
- }
- }), list( .imethod = imethod,
- .elocat = elocation, .escale = escale,
- .llocat = llocation, .lscale = lscale,
- .ilocat = ilocation, .iscale = iscale ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[, 1], .llocat, earg = .elocat)
- }, list( .elocat = elocation, .llocat = llocation ))),
- last = eval(substitute(expression({
- misc$link = c(location = .llocat, scale = .lscale)
- misc$earg = list(location = .elocat, scale = .escale)
- misc$expected = TRUE
- misc$RegCondOK = FALSE # Save this for later
- }), list( .escale = escale, .lscale = lscale,
- .elocat = elocation, .llocat = llocation ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- location = eta2theta(eta[, 1], .llocat, earg = .elocat)
- Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
- if (residuals) {
- stop("loglikelihood residuals not implemented yet")
- } else {
- sum(w * dlaplace(x = y, location = location, scale=Scale, log = TRUE))
- }
- }, list( .escale = escale, .lscale = lscale,
- .elocat = elocation, .llocat = llocation ))),
- vfamily = c("laplace"),
- deriv = eval(substitute(expression({
- location = eta2theta(eta[, 1], .llocat, earg = .elocat)
- Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
- zedd = abs(y-location) / Scale
- dl.dlocation = sign(y-location) / Scale
- dl.dscale = zedd / Scale - 1/Scale
- dlocation.deta = dtheta.deta(location, .llocat, earg = .elocat)
- dscale.deta = dtheta.deta(Scale, .lscale, earg = .escale)
- c(w) * cbind(dl.dlocation * dlocation.deta,
- dl.dscale * dscale.deta)
- }), list( .escale = escale, .lscale = lscale,
- .elocat = elocation, .llocat = llocation ))),
- weight = eval(substitute(expression({
- d2l.dlocation2 = d2l.dscale2 = 1 / Scale^2
- wz = matrix(0, nrow = n, ncol=M) # diagonal
- wz[,iam(1,1,M)] = d2l.dlocation2 * dlocation.deta^2
- wz[,iam(2,2,M)] = d2l.dscale2 * dscale.deta^2
- c(w) * wz
- }), list( .escale = escale, .lscale = lscale,
- .elocat = elocation, .llocat = llocation ))))
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 1)
+
+
+
+
+ predictors.names <-
+ c(namesof("location", .llocat , earg = .elocat, tag = FALSE),
+ namesof("scale", .lscale , earg = .escale, tag = FALSE))
+
+
+ if (!length(etastart)) {
+ if ( .imethod == 1) {
+ locat.init = median(y)
+ scale.init = sqrt(var(y) / 2)
+ } else if ( .imethod == 2) {
+ locat.init = weighted.mean(y, w)
+ scale.init = sqrt(var(y) / 2)
+ } else {
+ locat.init = median(y)
+ scale.init = sqrt(sum(c(w)*abs(y-median(y ))) / (sum(w) *2))
+ }
+ locat.init = if (length( .ilocat))
+ rep( .ilocat, length.out = n) else
+ rep(locat.init, length.out = n)
+ scale.init = if (length( .iscale))
+ rep( .iscale, length.out = n) else
+ rep(scale.init, length.out = n)
+ etastart =
+ cbind(theta2eta(locat.init, .llocat , earg = .elocat ),
+ theta2eta(scale.init, .lscale , earg = .escale ))
+ }
+ }), list( .imethod = imethod,
+ .elocat = elocat, .escale = escale,
+ .llocat = llocat, .lscale = lscale,
+ .ilocat = ilocat, .iscale = iscale ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[, 1], .llocat , earg = .elocat )
+ }, list( .elocat = elocat, .llocat = llocat ))),
+ last = eval(substitute(expression({
+ misc$link = c(location = .llocat , scale = .lscale )
+ misc$earg = list(location = .elocat , scale = .escale )
+ misc$expected = TRUE
+ misc$RegCondOK = FALSE # Save this for later
+ }), list( .escale = escale, .lscale = lscale,
+ .elocat = elocat, .llocat = llocat ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ locat = eta2theta(eta[, 1], .llocat , earg = .elocat )
+ Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ sum(c(w) * dlaplace(x = y, locat = locat,
+ scale = Scale, log = TRUE))
+ }
+ }, list( .escale = escale, .lscale = lscale,
+ .elocat = elocat, .llocat = llocat ))),
+ vfamily = c("laplace"),
+ deriv = eval(substitute(expression({
+ Locat = eta2theta(eta[, 1], .llocat , earg = .elocat )
+ Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+
+ zedd = abs(y-Locat) / Scale
+ dl.dLocat = sign(y - Locat) / Scale
+ dl.dscale = zedd / Scale - 1 / Scale
+
+ dLocat.deta = dtheta.deta(Locat, .llocat , earg = .elocat )
+ dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
+
+ c(w) * cbind(dl.dLocat * dLocat.deta,
+ dl.dscale * dscale.deta)
+ }), list( .escale = escale, .lscale = lscale,
+ .elocat = elocat, .llocat = llocat ))),
+ weight = eval(substitute(expression({
+ d2l.dLocat2 = d2l.dscale2 = 1 / Scale^2
+ wz = matrix(0, nrow = n, ncol=M) # diagonal
+ wz[,iam(1, 1, M)] = d2l.dLocat2 * dLocat.deta^2
+ wz[,iam(2, 2, M)] = d2l.dscale2 * dscale.deta^2
+ c(w) * wz
+ }), list( .escale = escale, .lscale = lscale,
+ .elocat = elocat, .llocat = llocat ))))
}
@@ -3282,48 +3309,59 @@ fff.control <- function(save.weight = TRUE, ...)
}
- fff = function(link = "loge", earg = list(),
- idf1 = NULL, idf2 = NULL, nsimEIM = 100, # ncp = 0,
- imethod = 1, zero = NULL) {
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
+ fff <- function(link = "loge",
+ idf1 = NULL, idf2 = NULL, nsimEIM = 100, # ncp = 0,
+ imethod = 1, zero = NULL) {
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
- if (!is.list(earg)) earg = list()
- if (!is.Numeric(nsimEIM, allowable.length = 1,
- integer.valued = TRUE) ||
- nsimEIM <= 10)
- stop("argument 'nsimEIM' should be an integer greater than 10")
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
- ncp = 0
- if (any(ncp != 0))
- warning("not sure about ncp != 0 wrt dl/dtheta")
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
- new("vglmff",
- blurb = c("F-distribution\n\n",
- "Links: ",
- namesof("df1", link, earg = earg), ", ",
- namesof("df2", link, earg = earg),
- "\n", "\n",
- "Mean: df2/(df2-2) provided df2>2 and ncp = 0", "\n",
- "Variance: ",
- "2*df2^2*(df1+df2-2)/(df1*(df2-2)^2*(df2-4)) ",
- "provided df2>4 and ncp = 0"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE) ||
+ nsimEIM <= 10)
+ stop("argument 'nsimEIM' should be an integer greater than 10")
+
+ ncp = 0
+ if (any(ncp != 0))
+ warning("not sure about ncp != 0 wrt dl/dtheta")
+
+
+
+ new("vglmff",
+ blurb = c("F-distribution\n\n",
+ "Links: ",
+ namesof("df1", link, earg = earg), ", ",
+ namesof("df2", link, earg = earg),
+ "\n", "\n",
+ "Mean: df2/(df2-2) provided df2>2 and ncp = 0", "\n",
+ "Variance: ",
+ "2*df2^2*(df1+df2-2)/(df1*(df2-2)^2*(df2-4)) ",
+ "provided df2>4 and ncp = 0"),
+ constraints = eval(substitute(expression({
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .zero = zero ))),
initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names = c(namesof("df1", .link , earg = .earg, tag = FALSE),
- namesof("df2", .link , earg = .earg, tag = FALSE))
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 1)
+
+
+
+ predictors.names <- c(namesof("df1", .link , earg = .earg , tag = FALSE),
+ namesof("df2", .link , earg = .earg , tag = FALSE))
+
+
if (!length(etastart)) {
if ( .imethod == 1) {
df2.init = b = 2*mean(y) / (mean(y)-1)
@@ -3331,257 +3369,267 @@ fff.control <- function(save.weight = TRUE, ...)
if (df2.init < 4) df2.init = 5
if (df1.init < 2) df1.init = 3
} else {
- df2.init = b = 2*median(y) / (median(y)-1)
- summy = summary(y)
- var.est = summy[5] - summy[2]
- df1.init = 2*b^2*(b-2)/(var.est*(b-2)^2 * (b-4) - 2*b^2)
- }
- df1.init = if (length( .idf1))
- rep( .idf1, length.out = n) else
- rep(df1.init, length.out = n)
- df2.init = if (length( .idf2))
- rep( .idf2, length.out = n) else
- rep(1, length.out = n)
- etastart = cbind(theta2eta(df1.init, .link , earg = .earg),
- theta2eta(df2.init, .link , earg = .earg))
+ df2.init = b = 2*median(y) / (median(y)-1)
+ summy = summary(y)
+ var.est = summy[5] - summy[2]
+ df1.init = 2*b^2*(b-2)/(var.est*(b-2)^2 * (b-4) - 2*b^2)
}
- }), list( .imethod = imethod, .idf1=idf1, .earg = earg,
- .idf2=idf2, .link = link ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- df2 = eta2theta(eta[, 2], .link , earg = .earg)
- ans = df2 * NA
- ans[df2>2] = df2[df2>2] / (df2[df2>2]-2)
- ans
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = c(df1 = .link , df2 = .link)
- misc$earg = list(df1 = .earg, df2 = .earg)
- misc$nsimEIM = .nsimEIM
- misc$ncp = .ncp
- }), list( .link = link, .earg = earg,
- .ncp=ncp,
- .nsimEIM = nsimEIM ))),
+ df1.init = if (length( .idf1))
+ rep( .idf1, length.out = n) else
+ rep(df1.init, length.out = n)
+ df2.init = if (length( .idf2))
+ rep( .idf2, length.out = n) else
+ rep(1, length.out = n)
+ etastart = cbind(theta2eta(df1.init, .link , earg = .earg ),
+ theta2eta(df2.init, .link , earg = .earg ))
+ }
+ }), list( .imethod = imethod, .idf1 = idf1, .earg = earg,
+ .idf2 = idf2, .link = link ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ df2 = eta2theta(eta[, 2], .link , earg = .earg )
+ ans = df2 * NA
+ ans[df2>2] = df2[df2>2] / (df2[df2>2]-2)
+ ans
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link = c(df1 = .link , df2 = .link )
+ misc$earg = list(df1 = .earg , df2 = .earg )
+
+ misc$nsimEIM = .nsimEIM
+ misc$ncp = .ncp
+ }), list( .link = link, .earg = earg,
+ .ncp = ncp,
+ .nsimEIM = nsimEIM ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- df1 = eta2theta(eta[, 1], .link , earg = .earg)
- df2 = eta2theta(eta[, 2], .link , earg = .earg)
+ df1 = eta2theta(eta[, 1], .link , earg = .earg )
+ df2 = eta2theta(eta[, 2], .link , earg = .earg )
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- sum(w * df(x = y, df1=df1, df2=df2, ncp= .ncp, log = TRUE))
+ sum(c(w) * df(x = y, df1 = df1, df2 = df2,
+ ncp = .ncp, log = TRUE))
}
}, list( .link = link, .earg = earg, .ncp=ncp ))),
vfamily = c("fff"),
deriv = eval(substitute(expression({
- df1 = eta2theta(eta[, 1], .link , earg = .earg)
- df2 = eta2theta(eta[, 2], .link , earg = .earg)
- dl.ddf1 = 0.5*digamma(0.5*(df1+df2)) + 0.5 + 0.5*log(df1/df2) +
- 0.5*log(y) - 0.5*digamma(0.5*df1) -
- 0.5*(df1+df2)*(y/df2) / (1 + df1*y/df2) -
- 0.5*log1p(df1*y/df2)
- dl.ddf2 = 0.5*digamma(0.5*(df1+df2)) - 0.5*df1/df2 -
- 0.5*digamma(0.5*df2) -
- 0.5*(df1+df2) * (-df1*y/df2^2) / (1 + df1*y/df2) -
- 0.5*log1p(df1*y/df2)
- ddf1.deta = dtheta.deta(df1, .link , earg = .earg)
- ddf2.deta = dtheta.deta(df2, .link , earg = .earg)
- dthetas.detas = cbind(ddf1.deta, ddf2.deta)
- w * dthetas.detas * cbind(dl.ddf1, dl.ddf2)
- }), list( .link = link, .earg = earg ))),
- weight = eval(substitute(expression({
- run.varcov = 0
- ind1 = iam(NA, NA, M=M, both = TRUE, diag = TRUE)
- for(ii in 1:( .nsimEIM )) {
- ysim = rf(n = n, df1=df1, df2=df2)
- dl.ddf1 = 0.5*digamma(0.5*(df1+df2)) + 0.5 + 0.5*log(df1/df2) +
- 0.5*log(ysim) - 0.5*digamma(0.5*df1) -
- 0.5*(df1+df2)*(ysim/df2) / (1 + df1*ysim/df2) -
- 0.5*log1p(df1*ysim/df2)
- dl.ddf2 = 0.5*digamma(0.5*(df1+df2)) - 0.5*df1/df2 -
- 0.5*digamma(0.5*df2) -
- 0.5*(df1+df2) * (-df1*ysim/df2^2)/(1 + df1*ysim/df2) -
- 0.5*log1p(df1*ysim/df2)
- rm(ysim)
- temp3 = cbind(dl.ddf1, dl.ddf2)
- run.varcov = ((ii-1) * run.varcov +
- temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
- }
- wz = if (intercept.only)
- matrix(colMeans(run.varcov),
- n, ncol(run.varcov), byrow = TRUE) else run.varcov
+ df1 = eta2theta(eta[, 1], .link , earg = .earg )
+ df2 = eta2theta(eta[, 2], .link , earg = .earg )
+ dl.ddf1 = 0.5*digamma(0.5*(df1+df2)) + 0.5 + 0.5*log(df1/df2) +
+ 0.5*log(y) - 0.5*digamma(0.5*df1) -
+ 0.5*(df1+df2)*(y/df2) / (1 + df1*y/df2) -
+ 0.5*log1p(df1*y/df2)
+ dl.ddf2 = 0.5*digamma(0.5*(df1+df2)) - 0.5*df1/df2 -
+ 0.5*digamma(0.5*df2) -
+ 0.5*(df1+df2) * (-df1*y/df2^2) / (1 + df1*y/df2) -
+ 0.5*log1p(df1*y/df2)
+ ddf1.deta = dtheta.deta(df1, .link , earg = .earg )
+ ddf2.deta = dtheta.deta(df2, .link , earg = .earg )
+ dthetas.detas = cbind(ddf1.deta, ddf2.deta)
+ c(w) * dthetas.detas * cbind(dl.ddf1, dl.ddf2)
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+ run.varcov = 0
+ ind1 = iam(NA, NA, M=M, both = TRUE, diag = TRUE)
+ for(ii in 1:( .nsimEIM )) {
+ ysim = rf(n = n, df1=df1, df2=df2)
+ dl.ddf1 = 0.5*digamma(0.5*(df1+df2)) + 0.5 + 0.5*log(df1/df2) +
+ 0.5*log(ysim) - 0.5*digamma(0.5*df1) -
+ 0.5*(df1+df2)*(ysim/df2) / (1 + df1*ysim/df2) -
+ 0.5*log1p(df1*ysim/df2)
+ dl.ddf2 = 0.5*digamma(0.5*(df1+df2)) - 0.5*df1/df2 -
+ 0.5*digamma(0.5*df2) -
+ 0.5*(df1+df2) * (-df1*ysim/df2^2)/(1 + df1*ysim/df2) -
+ 0.5*log1p(df1*ysim/df2)
+ rm(ysim)
+ temp3 = cbind(dl.ddf1, dl.ddf2)
+ run.varcov = ((ii-1) * run.varcov +
+ temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
+ }
+ wz = if (intercept.only)
+ matrix(colMeans(run.varcov),
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
- wz = c(w) * wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
- wz
- }), list( .link = link, .earg = earg, .nsimEIM = nsimEIM,
- .ncp = ncp ))))
+ wz = c(w) * wz * dthetas.detas[, ind1$row] *
+ dthetas.detas[, ind1$col]
+ wz
+ }), list( .link = link, .earg = earg, .nsimEIM = nsimEIM,
+ .ncp = ncp ))))
}
- hyperg = function(N = NULL, D = NULL,
- lprob = "logit", earg = list(),
- iprob = NULL) {
- if (mode(lprob) != "character" && mode(lprob) != "name")
- lprob = as.character(substitute(lprob))
- inputN = is.Numeric(N, positive = TRUE)
- inputD = is.Numeric(D, positive = TRUE)
- if (inputD && inputN)
- stop("only one of 'N' and 'D' is to be inputted")
- if (!inputD && !inputN)
- stop("one of 'N' and 'D' needs to be inputted")
- if (!is.list(earg)) earg = list()
+ hyperg <- function(N = NULL, D = NULL,
+ lprob = "logit",
+ iprob = NULL) {
- new("vglmff",
- blurb = c("Hypergeometric distribution\n\n",
- "Link: ",
- namesof("prob", lprob, earg = earg), "\n",
- "Mean: D/N\n"),
- initialize = eval(substitute(expression({
- NCOL = function (x)
- if (is.array(x) && length(dim(x)) > 1 ||
- is.data.frame(x)) ncol(x) else as.integer(1)
- if (NCOL(y) == 1) {
- if (is.factor(y)) y = y != levels(y)[1]
- nn = rep(1, length.out = n)
- if (!all(y >= 0 & y <= 1))
- stop("response values must be in [0, 1]")
- mustart = (0.5 + w * y) / (1 + w)
- no.successes = w * y
- if (any(abs(no.successes - round(no.successes)) > 0.001))
- stop("Number of successes must be integer-valued")
- } else if (NCOL(y) == 2) {
- if (any(abs(y - round(y)) > 0.001))
- stop("Count data must be integer-valued")
- nn = y[, 1] + y[, 2]
- y = ifelse(nn > 0, y[, 1]/nn, 0)
- w = w * nn
- mustart = (0.5 + nn * y) / (1 + nn)
- mustart[mustart >= 1] = 0.95
- } else
- stop("Response not of the right form")
+ inputN = is.Numeric(N, positive = TRUE)
+ inputD = is.Numeric(D, positive = TRUE)
+ if (inputD && inputN)
+ stop("only one of 'N' and 'D' is to be inputted")
+ if (!inputD && !inputN)
+ stop("one of 'N' and 'D' needs to be inputted")
- predictors.names = namesof("prob", .lprob ,
- earg = .earg , tag = FALSE)
- extra$Nvector = .N
- extra$Dvector = .D
- extra$Nunknown = length(extra$Nvector) == 0
- if (!length(etastart)) {
- init.prob = if (length( .iprob))
- rep( .iprob, length.out = n) else
- mustart
+
+ lprob <- as.list(substitute(lprob))
+ earg <- link2list(lprob)
+ lprob <- attr(earg, "function.name")
+
+
+
+ new("vglmff",
+ blurb = c("Hypergeometric distribution\n\n",
+ "Link: ",
+ namesof("prob", lprob, earg = earg), "\n",
+ "Mean: D/N\n"),
+ initialize = eval(substitute(expression({
+ NCOL = function (x)
+ if (is.array(x) && length(dim(x)) > 1 ||
+ is.data.frame(x)) ncol(x) else as.integer(1)
+ if (NCOL(y) == 1) {
+ if (is.factor(y)) y = y != levels(y)[1]
+ nn = rep(1, length.out = n)
+ if (!all(y >= 0 & y <= 1))
+ stop("response values must be in [0, 1]")
+ mustart = (0.5 + w * y) / (1 + w)
+ no.successes = w * y
+ if (any(abs(no.successes - round(no.successes)) > 0.001))
+ stop("Number of successes must be integer-valued")
+ } else if (NCOL(y) == 2) {
+ if (any(abs(y - round(y)) > 0.001))
+ stop("Count data must be integer-valued")
+ nn = y[, 1] + y[, 2]
+ y = ifelse(nn > 0, y[, 1]/nn, 0)
+ w = w * nn
+ mustart = (0.5 + nn * y) / (1 + nn)
+ mustart[mustart >= 1] = 0.95
+ } else
+ stop("Response not of the right form")
+
+ predictors.names <-
+ namesof("prob", .lprob , earg = .earg , tag = FALSE)
+ extra$Nvector = .N
+ extra$Dvector = .D
+ extra$Nunknown = length(extra$Nvector) == 0
+ if (!length(etastart)) {
+ init.prob = if (length( .iprob))
+ rep( .iprob, length.out = n) else
+ mustart
etastart = matrix(init.prob, n, ncol(cbind(y )))
- }
- }), list( .lprob = lprob, .earg = earg, .N = N, .D = D,
- .iprob = iprob ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta, .lprob, earg = .earg)
- }, list( .lprob = lprob, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link <- c("prob" = .lprob)
- misc$earg <- list("prob" = .earg)
- misc$Dvector <- .D
- misc$Nvector <- .N
- }), list( .N = N, .D = D, .lprob = lprob, .earg = earg ))),
- linkfun = eval(substitute(function(mu, extra = NULL) {
- theta2eta(mu, .lprob, earg = .earg)
- }, list( .lprob = lprob, .earg = earg ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- N = extra$Nvector
- Dvec = extra$Dvector
- prob = mu
- yvec = w * y
- if (residuals) {
- stop("loglikelihood residuals not implemented yet")
- } else {
- if (extra$Nunknown) {
- tmp12 = Dvec * (1-prob) / prob
+ }
+ }), list( .lprob = lprob, .earg = earg, .N = N, .D = D,
+ .iprob = iprob ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta, .lprob, earg = .earg )
+ }, list( .lprob = lprob, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link <- c("prob" = .lprob)
+ misc$earg <- list("prob" = .earg )
+ misc$Dvector <- .D
+ misc$Nvector <- .N
+ }), list( .N = N, .D = D, .lprob = lprob, .earg = earg ))),
+ linkfun = eval(substitute(function(mu, extra = NULL) {
+ theta2eta(mu, .lprob, earg = .earg )
+ }, list( .lprob = lprob, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ N = extra$Nvector
+ Dvec = extra$Dvector
+ prob = mu
+ yvec = w * y
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ if (extra$Nunknown) {
+ tmp12 = Dvec * (1-prob) / prob
- sum(lgamma(1+tmp12) + lgamma(1+Dvec/prob-w) -
- lgamma(1+tmp12-w+yvec) - lgamma(1+Dvec/prob))
+ sum(lgamma(1+tmp12) + lgamma(1+Dvec/prob-w) -
+ lgamma(1+tmp12-w+yvec) - lgamma(1+Dvec/prob))
- } else {
+ } else {
- sum(lgamma(1+N*prob) + lgamma(1+N*(1-prob)) -
- lgamma(1+N*prob-yvec) -
- lgamma(1+N*(1-prob) -w + yvec))
- }
- }
- }, list( .lprob = lprob, .earg = earg ))),
- vfamily = c("hyperg"),
- deriv = eval(substitute(expression({
- prob = mu # equivalently, eta2theta(eta, .lprob, earg = .earg)
- dprob.deta = dtheta.deta(prob, .lprob, earg = .earg)
- Dvec = extra$Dvector
- Nvec = extra$Nvector
- yvec = w * y
- if (extra$Nunknown) {
- tmp72 = -Dvec / prob^2
- tmp12 = Dvec * (1-prob) / prob
- dl.dprob = tmp72 * (digamma(1 + tmp12) +
- digamma(1 + Dvec/prob -w) -
- digamma(1 + tmp12-w+yvec) - digamma(1 + Dvec/prob))
- } else {
- dl.dprob = Nvec * (digamma(1+Nvec*prob) -
- digamma(1+Nvec*(1-prob)) -
- digamma(1+Nvec*prob-yvec) +
- digamma(1+Nvec*(1-prob)-w+yvec))
- }
- w * dl.dprob * dprob.deta
- }), list( .lprob = lprob, .earg = earg ))),
- weight = eval(substitute(expression({
- if (extra$Nunknown) {
- tmp722 = tmp72^2
- tmp13 = 2*Dvec / prob^3
- d2l.dprob2 = tmp722 * (trigamma(1 + tmp12) +
- trigamma(1 + Dvec/prob - w) -
- trigamma(1 + tmp12 - w + yvec) -
- trigamma(1 + Dvec/prob)) +
- tmp13 * (digamma(1 + tmp12) +
- digamma(1 + Dvec/prob - w) -
- digamma(1 + tmp12 - w + yvec) -
- digamma(1 + Dvec/prob))
- } else {
- d2l.dprob2 = Nvec^2 * (trigamma(1+Nvec*prob) +
- trigamma(1+Nvec*(1-prob)) -
- trigamma(1+Nvec*prob-yvec) -
- trigamma(1+Nvec*(1-prob)-w+yvec))
- }
- d2prob.deta2 = d2theta.deta2(prob, .lprob, earg = .earg)
+ sum(lgamma(1+N*prob) + lgamma(1+N*(1-prob)) -
+ lgamma(1+N*prob-yvec) -
+ lgamma(1+N*(1-prob) -w + yvec))
+ }
+ }
+ }, list( .lprob = lprob, .earg = earg ))),
+ vfamily = c("hyperg"),
+ deriv = eval(substitute(expression({
+ prob = mu # equivalently, eta2theta(eta, .lprob, earg = .earg )
+ dprob.deta = dtheta.deta(prob, .lprob, earg = .earg )
+ Dvec = extra$Dvector
+ Nvec = extra$Nvector
+ yvec = w * y
+ if (extra$Nunknown) {
+ tmp72 = -Dvec / prob^2
+ tmp12 = Dvec * (1-prob) / prob
+ dl.dprob = tmp72 * (digamma(1 + tmp12) +
+ digamma(1 + Dvec/prob -w) -
+ digamma(1 + tmp12-w+yvec) - digamma(1 + Dvec/prob))
+ } else {
+ dl.dprob = Nvec * (digamma(1+Nvec*prob) -
+ digamma(1+Nvec*(1-prob)) -
+ digamma(1+Nvec*prob-yvec) +
+ digamma(1+Nvec*(1-prob)-w+yvec))
+ }
+ c(w) * dl.dprob * dprob.deta
+ }), list( .lprob = lprob, .earg = earg ))),
+ weight = eval(substitute(expression({
+ if (extra$Nunknown) {
+ tmp722 = tmp72^2
+ tmp13 = 2*Dvec / prob^3
+ d2l.dprob2 = tmp722 * (trigamma(1 + tmp12) +
+ trigamma(1 + Dvec/prob - w) -
+ trigamma(1 + tmp12 - w + yvec) -
+ trigamma(1 + Dvec/prob)) +
+ tmp13 * (digamma(1 + tmp12) +
+ digamma(1 + Dvec/prob - w) -
+ digamma(1 + tmp12 - w + yvec) -
+ digamma(1 + Dvec/prob))
+ } else {
+ d2l.dprob2 = Nvec^2 * (trigamma(1+Nvec*prob) +
+ trigamma(1+Nvec*(1-prob)) -
+ trigamma(1+Nvec*prob-yvec) -
+ trigamma(1+Nvec*(1-prob)-w+yvec))
+ }
+ d2prob.deta2 = d2theta.deta2(prob, .lprob, earg = .earg )
- wz = -(dprob.deta^2) * d2l.dprob2
- wz = c(w) * wz
- wz[wz < .Machine$double.eps] = .Machine$double.eps
- wz
+ wz = -(dprob.deta^2) * d2l.dprob2
+ wz = c(w) * wz
+ wz[wz < .Machine$double.eps] = .Machine$double.eps
+ wz
}), list( .lprob = lprob, .earg = earg ))))
}
-dbenini = function(x, shape, y0, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
+dbenini <- function(x, shape, y0, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
- N = max(length(x), length(shape), length(y0))
- x = rep(x, length.out = N);
- shape = rep(shape, length.out = N);
- y0 = rep(y0, length.out = N);
- logdensity = rep(log(0), length.out = N)
- xok = (x > y0)
- tempxok = log(x[xok]/y0[xok])
- logdensity[xok] = log(2*shape[xok]) - shape[xok] * tempxok^2 +
- log(tempxok) - log(x[xok])
- if (log.arg) logdensity else exp(logdensity)
+
+ N = max(length(x), length(shape), length(y0))
+ x = rep(x, length.out = N);
+ shape = rep(shape, length.out = N);
+ y0 = rep(y0, length.out = N);
+
+ logdensity = rep(log(0), length.out = N)
+ xok = (x > y0)
+ tempxok = log(x[xok]/y0[xok])
+ logdensity[xok] = log(2*shape[xok]) - shape[xok] * tempxok^2 +
+ log(tempxok) - log(x[xok])
+ if (log.arg) logdensity else exp(logdensity)
}
-pbenini = function(q, shape, y0) {
+pbenini <- function(q, shape, y0) {
if (!is.Numeric(q))
stop("bad input for argument 'q'")
if (!is.Numeric(shape, positive = TRUE))
@@ -3600,7 +3648,7 @@ pbenini = function(q, shape, y0) {
}
-qbenini = function(p, shape, y0) {
+qbenini <- function(p, shape, y0) {
if (!is.Numeric(p, positive = TRUE) ||
any(p >= 1))
stop("bad input for argument 'p'")
@@ -3612,151 +3660,162 @@ qbenini = function(p, shape, y0) {
}
-rbenini = function(n, shape, y0) {
+rbenini <- function(n, shape, y0) {
y0 * exp(sqrt(-log(runif(n)) / shape))
}
- benini = function(y0 = stop("argument 'y0' must be specified"),
- lshape = "loge", earg = list(),
- ishape = NULL, imethod = 1) {
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
- if (!is.Numeric(y0, allowable.length = 1, positive = TRUE))
- stop("bad input for argument 'y0'")
- if (!is.list(earg)) earg = list()
+ benini <- function(y0 = stop("argument 'y0' must be specified"),
+ lshape = "loge",
+ ishape = NULL, imethod = 1, zero = NULL) {
+
+ lshape <- as.list(substitute(lshape))
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
+
- new("vglmff",
- blurb = c("1-parameter Benini distribution\n\n",
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+ if (!is.Numeric(y0, positive = TRUE))
+ stop("bad input for argument 'y0'")
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+
+ new("vglmff",
+ blurb = c("1-parameter Benini distribution\n\n",
"Link: ",
- namesof("shape", lshape, earg = earg),
- "\n", "\n"),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("shape", .lshape, earg = .earg, tag = FALSE))
- extra$y0 = .y0
- if (min(y) <= extra$y0)
- stop("argument 'y0' is too large")
- if (!length(etastart)) {
- probs = (1:3) / 4
- qofy = quantile(rep(y, times=w), probs=probs)
- if ( .imethod == 1) {
- shape.init = mean(-log1p(-probs) / (log(qofy))^2)
- } else {
- shape.init = median(-log1p(-probs) / (log(qofy))^2)
- }
- shape.init = if (length( .ishape))
- rep( .ishape, length.out = n) else
- rep(shape.init, length.out = n)
- etastart = cbind(theta2eta(shape.init, .lshape, earg = .earg))
- }
- }), list( .imethod = imethod,
- .ishape = ishape,
- .lshape = lshape, .earg = earg,
- .y0=y0 ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- shape = eta2theta(eta, .lshape, earg = .earg)
- temp = 1/(4*shape)
- extra$y0 * exp(temp) *
- ((sqrt(pi) * pgamma(temp, 0.5, lower.tail = FALSE)) / (2*sqrt(shape)) +
- pgamma(temp, 1.0, lower.tail = FALSE))
- }, list( .lshape = lshape, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = c(shape = .lshape)
- misc$earg = list(shape = .earg )
- }), list( .lshape = lshape, .earg = earg ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- shape = eta2theta(eta, .lshape, earg = .earg)
- y0 = extra$y0
- if (residuals) {
- stop("loglikelihood residuals not implemented yet")
- } else {
- sum(w * dbenini(x = y, shape=shape, y0=y0, log = TRUE))
- }
- }, list( .lshape = lshape, .earg = earg ))),
- vfamily = c("benini"),
- deriv = eval(substitute(expression({
- shape = eta2theta(eta, .lshape, earg = .earg)
- y0 = extra$y0
- dl.dshape = 1/shape - (log(y/y0))^2
- dshape.deta = dtheta.deta(shape, .lshape, earg = .earg)
- w * dl.dshape * dshape.deta
- }), list( .lshape = lshape, .earg = earg ))),
- weight = eval(substitute(expression({
- d2l.dshape2 = 1 / shape^2
- wz = d2l.dshape2 * dshape.deta^2
- c(w) * wz
- }), list( .lshape = lshape, .earg = earg ))))
-}
+ namesof("shape", lshape, earg = eshape),
+ "\n", "\n",
+ "Median: qbenini(p = 0.5, shape, y0)"),
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 1
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ lshape = .lshape ,
+ eshape = .eshape)
+ }, list( .eshape = eshape,
+ .lshape = lshape ))),
+ initialize = eval(substitute(expression({
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
-if (FALSE)
-dpolono = function(x, meanlog = 0, sdlog = 1, bigx = Inf, ...) {
- if (!is.Numeric(x))
- stop("bad input for argument 'x'")
- if (!is.Numeric(meanlog))
- stop("bad input for argument 'meanlog'")
- if (!is.Numeric(sdlog, positive = TRUE))
- stop("bad input for argument 'sdlog'")
+ ncoly <- ncol(y)
+ Musual <- 1
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
- if (length(bigx) != 1)
- stop("bad input for argument 'bigx'")
- if (bigx < 10)
- warning("argument 'bigx' is probably too small")
- N = max(length(x), length(meanlog), length(sdlog))
- x = rep(x, length.out = N);
- meanlog = rep(meanlog, length.out = N);
- sdlog = rep(sdlog, length.out = N)
- ans = x * 0
- integrand = function(t, x, meanlog, sdlog)
- exp(t*x - exp(t) - 0.5*((t-meanlog)/sdlog)^2)
- for(ii in 1:N) {
- if (x[ii] == round(x[ii]) && x[ii] >= 0) {
- if (x[ii] >= bigx) {
- zedd = (log(x[ii])-meanlog[ii]) / sdlog[ii]
- temp = 1 + (zedd^2 + log(x[ii]) - meanlog[ii] -
- 1) / (2*x[ii]*(sdlog[ii])^2)
- ans[ii] = temp * exp(-0.5*zedd^2)/(sqrt(2*pi)*
- sdlog[ii] * x[ii])
- } else {
- temp = integrate(f=integrand, lower=-Inf,
- upper = Inf, x = x[ii],
- meanlog=meanlog[ii],
- sdlog = sdlog[ii], ...)
- if (temp$message == "OK") {
- ans[ii] = temp$value / (sqrt(2*pi) * sdlog[ii] *
- exp(lgamma(x[ii]+1)))
- } else {
- warning("could not integrate (numerically) observation ",
- ii)
- ans[ii] = NA
- }
- }
- }
+ mynames1 <- paste("shape", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ namesof(mynames1, .lshape , earg = .eshape , tag = FALSE)
+
+ extra$y0 <- matrix( .y0 , n, ncoly, byrow = TRUE)
+ if (any(y <= extra$y0))
+ stop("some values of the response are > argument 'y0' values")
+
+
+ if (!length(etastart)) {
+ probs.y = (1:3) / 4
+ qofy = quantile(rep(y, times = w), probs = probs.y)
+ if ( .imethod == 1) {
+ shape.init <- mean(-log1p(-probs.y) / (log(qofy))^2)
+ } else {
+ shape.init <- median(-log1p(-probs.y) / (log(qofy))^2)
+ }
+ shape.init <- matrix(if (length( .ishape )) .ishape else shape.init,
+ n, ncoly, byrow = TRUE)
+ etastart <- cbind(theta2eta(shape.init, .lshape , earg = .eshape ))
+ }
+ }), list( .imethod = imethod,
+ .ishape = ishape,
+ .lshape = lshape, .eshape = eshape,
+ .y0 = y0 ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ shape = eta2theta(eta, .lshape , earg = .eshape )
+
+
+ qbenini(p = 0.5, shape, y0 = extra$y0)
+ }, list( .lshape = lshape, .eshape = eshape ))),
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+ misc$link <- c(rep( .lshape , length = ncoly))
+ names(misc$link) <- mynames1
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- mynames1
+ for(ii in 1:ncoly) {
+ misc$earg[[ii]] <- .eshape
}
- ans
+
+ misc$Musual <- Musual
+ misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
+
+
+ extra$y0 <- .y0
+
+ }), list( .lshape = lshape,
+ .eshape = eshape, .y0 = y0 ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ shape = eta2theta(eta, .lshape , earg = .eshape )
+ y0 = extra$y0
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ sum(c(w) * dbenini(x = y, shape=shape, y0 = y0, log = TRUE))
+ }
+ }, list( .lshape = lshape, .eshape = eshape ))),
+ vfamily = c("benini"),
+ deriv = eval(substitute(expression({
+ shape = eta2theta(eta, .lshape , earg = .eshape )
+
+ y0 = extra$y0
+ dl.dshape = 1/shape - (log(y/y0))^2
+
+ dshape.deta = dtheta.deta(shape, .lshape , earg = .eshape )
+
+ c(w) * dl.dshape * dshape.deta
+ }), list( .lshape = lshape, .eshape = eshape ))),
+ weight = eval(substitute(expression({
+ ned2l.dshape2 = 1 / shape^2
+ wz = ned2l.dshape2 * dshape.deta^2
+ c(w) * wz
+ }), list( .lshape = lshape, .eshape = eshape ))))
}
+
+
dpolono <- function (x, meanlog = 0, sdlog = 1, bigx = 170, ...) {
mapply(function(x, meanlog, sdlog, ...) {
- if (abs(x) > floor(x)) { # zero prob for -ve or non-integer
+ if (abs(x) > floor(x)) { # zero prob for -ve or non-integer
0
} else
if (x > bigx) {
@@ -3778,16 +3837,16 @@ ppolono <- function(q, meanlog = 0, sdlog = 1,
isOne = 1 - sqrt( .Machine$double.eps ), ...) {
- .cumprob <- rep(0, length(q))
- .cumprob[q == Inf] <- 1 # special case
+ .cumprob <- rep(0, length(q))
+ .cumprob[q == Inf] <- 1 # special case
- q <- floor(q)
- i <- -1
- while (any(xActive <- ((.cumprob < isOne) & (q > i))))
- .cumprob[xActive] <- .cumprob[xActive] +
- dpolono(i <- (i+1), meanlog, sdlog, ...)
- .cumprob
+ q <- floor(q)
+ ii <- -1
+ while (any(xActive <- ((.cumprob < isOne) & (q > ii))))
+ .cumprob[xActive] <- .cumprob[xActive] +
+ dpolono(ii <- (ii+1), meanlog, sdlog, ...)
+ .cumprob
}
@@ -3798,9 +3857,9 @@ ppolono <- function(q, meanlog = 0, sdlog = 1,
-rpolono = function(n, meanlog = 0, sdlog = 1) {
+rpolono <- function(n, meanlog = 0, sdlog = 1) {
lambda = rlnorm(n = n, meanlog = meanlog, sdlog = sdlog)
- rpois(n = n, lambda = lambda)
+ rpois(n = n, lambda = lambda)
}
@@ -3813,34 +3872,35 @@ rpolono = function(n, meanlog = 0, sdlog = 1) {
-dtriangle = function(x, theta, lower = 0, upper = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
+dtriangle <- function(x, theta, lower = 0, upper = 1, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
- N = max(length(x), length(theta), length(lower), length(upper))
- x = rep(x, length.out = N);
- lower = rep(lower, length.out = N);
- upper = rep(upper, length.out = N);
- theta = rep(theta, length.out = N)
- denom1 = ((upper-lower)*(theta-lower))
- denom2 = ((upper-lower)*(upper-theta))
- logdensity = rep(log(0), length.out = N)
- xok.neg = (lower < x) & (x <= theta)
- xok.pos = (theta <= x) & (x < upper)
- logdensity[xok.neg] =
- log(2 * (x[xok.neg] - lower[xok.neg]) / denom1[xok.neg])
- logdensity[xok.pos] =
- log(2 * (upper[xok.pos] - x[xok.pos]) / denom2[xok.pos])
- logdensity[lower >= upper] = NaN
- logdensity[lower > theta] = NaN
- logdensity[upper < theta] = NaN
- if (log.arg) logdensity else exp(logdensity)
+ N = max(length(x), length(theta), length(lower), length(upper))
+ x = rep(x, length.out = N);
+ lower = rep(lower, length.out = N);
+ upper = rep(upper, length.out = N);
+ theta = rep(theta, length.out = N)
+
+ denom1 = ((upper-lower)*(theta-lower))
+ denom2 = ((upper-lower)*(upper-theta))
+ logdensity = rep(log(0), length.out = N)
+ xok.neg = (lower < x) & (x <= theta)
+ xok.pos = (theta <= x) & (x < upper)
+ logdensity[xok.neg] =
+ log(2 * (x[xok.neg] - lower[xok.neg]) / denom1[xok.neg])
+ logdensity[xok.pos] =
+ log(2 * (upper[xok.pos] - x[xok.pos]) / denom2[xok.pos])
+ logdensity[lower >= upper] = NaN
+ logdensity[lower > theta] = NaN
+ logdensity[upper < theta] = NaN
+ if (log.arg) logdensity else exp(logdensity)
}
-rtriangle = function(n, theta, lower = 0, upper = 1) {
+rtriangle <- function(n, theta, lower = 0, upper = 1) {
if (!is.Numeric(n, integer.valued = TRUE, allowable.length = 1))
stop("bad input for argument 'n'")
if (!is.Numeric(theta))
@@ -3851,6 +3911,7 @@ rtriangle = function(n, theta, lower = 0, upper = 1) {
stop("bad input for argument 'upper'")
if (!all(lower < theta & theta < upper))
stop("lower < theta < upper values are required")
+
N = n
lower = rep(lower, length.out = N);
upper = rep(upper, length.out = N);
@@ -3863,7 +3924,7 @@ rtriangle = function(n, theta, lower = 0, upper = 1) {
}
-qtriangle = function(p, theta, lower = 0, upper = 1) {
+qtriangle <- function(p, theta, lower = 0, upper = 1) {
if (!is.Numeric(p, positive = TRUE))
stop("bad input for argument 'p'")
if (!is.Numeric(theta))
@@ -3899,13 +3960,13 @@ qtriangle = function(p, theta, lower = 0, upper = 1) {
qstar = ifelse(qstar[, 1] >= 0 & qstar[, 1] <= 1,
qstar[, 1],
qstar[, 2])
- ans[Pos] = theta[Pos] + qstar * (upper-theta)[Pos]
+ ans[Pos] = theta[Pos] + qstar * (upper - theta)[Pos]
}
ans
}
-ptriangle = function(q, theta, lower = 0, upper = 1) {
+ptriangle <- function(q, theta, lower = 0, upper = 1) {
if (!is.Numeric(q))
stop("bad input for argument 'q'")
if (!is.Numeric(theta))
@@ -3938,10 +3999,9 @@ ptriangle = function(q, theta, lower = 0, upper = 1) {
- triangle = function(lower = 0, upper = 1,
- link = "elogit", earg = if (link == "elogit")
- list(min = lower, max = upper) else list(),
- itheta = NULL)
+ triangle <- function(lower = 0, upper = 1,
+ link = elogit(min = lower, max = upper),
+ itheta = NULL)
{
if (!is.Numeric(lower))
stop("bad input for argument 'lower'")
@@ -3949,35 +4009,50 @@ ptriangle = function(q, theta, lower = 0, upper = 1) {
stop("bad input for argument 'upper'")
if (!all(lower < upper))
stop("lower < upper values are required")
+
if (length(itheta) && !is.Numeric(itheta))
stop("bad input for 'itheta'")
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
new("vglmff",
blurb = c(
"Triangle distribution\n\n",
"Link: ",
namesof("theta", link, earg = earg)),
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ link = .link )
+ }, list( .link = link ))),
+
initialize = eval(substitute(expression({
- y = as.numeric(y)
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 1)
+
+
+
extra$lower = rep( .lower, length.out = n)
extra$upper = rep( .upper, length.out = n)
if (any(y <= extra$lower | y >= extra$upper))
stop("some y values in [lower,upper] detected")
- predictors.names =
- namesof("theta", .link , earg = .earg, tag = FALSE)
+
+ predictors.names <-
+ namesof("theta", .link , earg = .earg , tag = FALSE)
+
+
if (!length(etastart)) {
- Theta.init = if (length( .itheta)) .itheta else {
- weighted.mean(y, w)
- }
- Theta.init = rep(Theta.init, length = n)
- etastart = theta2eta(Theta.init, .link , earg = .earg )
+ Theta.init = if (length( .itheta )) .itheta else {
+ weighted.mean(y, w)
+ }
+ Theta.init = rep(Theta.init, length = n)
+ etastart = theta2eta(Theta.init, .link , earg = .earg )
}
}), list( .link = link, .earg = earg, .itheta=itheta,
.upper = upper, .lower = lower ))),
@@ -3992,8 +4067,9 @@ ptriangle = function(q, theta, lower = 0, upper = 1) {
mu
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link = c(theta = .link)
- misc$earg = list(theta = .earg)
+ misc$link = c(theta = .link )
+ misc$earg = list(theta = .earg )
+
misc$expected = TRUE
}), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
@@ -4004,26 +4080,30 @@ ptriangle = function(q, theta, lower = 0, upper = 1) {
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- sum(w * dtriangle(x = y, theta=Theta, lower = lower,
- upper = upper, log = TRUE))
+ sum(c(w) * dtriangle(x = y, theta = Theta, lower = lower,
+ upper = upper, log = TRUE))
}
}, list( .link = link, .earg = earg ))),
vfamily = c("triangle"),
deriv = eval(substitute(expression({
Theta = eta2theta(eta, .link , earg = .earg )
+
dTheta.deta = dtheta.deta(Theta, .link , earg = .earg )
+
pos = y > Theta
neg = y < Theta
lower = extra$lower
upper = extra$upper
+
dl.dTheta = 0 * y
dl.dTheta[neg] = -1 / (Theta[neg]-lower[neg])
dl.dTheta[pos] = 1 / (upper[pos]-Theta[pos])
- dl.dTheta * dTheta.deta
+
+ w * dl.dTheta * dTheta.deta
}), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
- d2l.dTheta2 = 1 / ((Theta-lower)*(upper-Theta))
- wz = dTheta.deta^2 * d2l.dTheta2
+ d2l.dTheta2 = 1 / ((Theta - lower) * (upper - Theta))
+ wz = d2l.dTheta2 * dTheta.deta^2
c(w) * wz
}), list( .link = link, .earg = earg ))))
}
@@ -4034,21 +4114,20 @@ ptriangle = function(q, theta, lower = 0, upper = 1) {
-adjust0.loglaplace1 = function(ymat, y, w, rep0) {
- rangey0 = range(y[y > 0])
- ymat[ymat <= 0] = min(rangey0[1] / 2, rep0)
- ymat
+adjust0.loglaplace1 <- function(ymat, y, w, rep0) {
+ rangey0 = range(y[y > 0])
+ ymat[ymat <= 0] = min(rangey0[1] / 2, rep0)
+ ymat
}
loglaplace1.control <- function(maxit = 300, ...)
{
- list(maxit = maxit)
+ list(maxit = maxit)
}
- loglaplace1 = function(tau = NULL,
+ loglaplace1 <- function(tau = NULL,
llocation = "loge",
- elocation = list(),
ilocation = NULL,
kappa = sqrt(tau/(1-tau)),
Scale.arg = 1,
@@ -4059,77 +4138,109 @@ loglaplace1.control <- function(maxit = 300, ...)
minquantile = 0, maxquantile = Inf,
imethod = 1, zero = NULL) {
- if (length(minquantile) != 1)
- stop("bad input for argument 'minquantile'")
- if (length(maxquantile) != 1)
- stop("bad input for argument 'maxquantile'")
- if (!is.Numeric(rep0, positive = TRUE, allowable.length = 1) ||
- rep0 > 1)
- stop("bad input for argument 'rep0'")
- if (!is.Numeric(kappa, positive = TRUE))
- stop("bad input for argument 'kappa'")
-
- if (length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
- stop("arguments 'kappa' and 'tau' do not match")
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 4)
- stop("argument 'imethod' must be 1, 2 or ... 4")
-
- if (!is.list(elocation)) elocation = list()
- if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
- shrinkage.init < 0 ||
- shrinkage.init > 1)
- stop("bad input for argument 'shrinkage.init'")
-
- if (length(zero) &&
- !(is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
- is.character(zero )))
- stop("bad input for argument 'zero'")
- if (!is.Numeric(Scale.arg, positive = TRUE))
- stop("bad input for argument 'Scale.arg'")
- if (!is.logical(parallelLocation) ||
- length(parallelLocation) != 1)
- stop("bad input for argument 'parallelLocation'")
- fittedMean = FALSE
- if (!is.logical(fittedMean) || length(fittedMean) != 1)
- stop("bad input for argument 'fittedMean'")
-
- mystring0 = namesof("location", llocation, earg = elocation)
- mychars = substring(mystring0, first = 1:nchar(mystring0),
- last = 1:nchar(mystring0))
- mychars[nchar(mystring0)] = ", inverse = TRUE)"
- mystring1 = paste(mychars, collapse = "")
-
-
- new("vglmff",
- blurb = c("One-parameter ",
- if (llocation == "loge") "log-Laplace" else
- c(llocation, "-Laplace"),
- " distribution\n\n",
- "Links: ", mystring0, "\n", "\n",
- "Quantiles: ", mystring1),
- constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1, M, 1), x, .parallelLocation,
- constraints, intercept = FALSE)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .parallelLocation = parallelLocation,
- .Scale.arg = Scale.arg, .zero = zero ))),
- initialize = eval(substitute(expression({
- extra$M = M = max(length( .Scale.arg ), length( .kappa )) # Recycle
- extra$Scale = rep( .Scale.arg, length = M)
- extra$kappa = rep( .kappa, length = M)
- extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
- if (ncol(y <- cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
+ if (length(minquantile) != 1)
+ stop("bad input for argument 'minquantile'")
+ if (length(maxquantile) != 1)
+ stop("bad input for argument 'maxquantile'")
+
+
+ if (!is.Numeric(rep0, positive = TRUE, allowable.length = 1) ||
+ rep0 > 1)
+ stop("bad input for argument 'rep0'")
+ if (!is.Numeric(kappa, positive = TRUE))
+ stop("bad input for argument 'kappa'")
+
+ if (length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
+ stop("arguments 'kappa' and 'tau' do not match")
+
+
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
+ ilocat <- ilocation
+
+
+ llocat.identity <- as.list(substitute("identity"))
+ elocat.identity <- link2list(llocat.identity)
+ llocat.identity <- attr(elocat.identity, "function.name")
+
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 4)
+ stop("argument 'imethod' must be 1, 2 or ... 4")
+
+
+ if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+ shrinkage.init < 0 ||
+ shrinkage.init > 1)
+ stop("bad input for argument 'shrinkage.init'")
+
+ if (length(zero) &&
+ !(is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
+ is.character(zero )))
+ stop("bad input for argument 'zero'")
+ if (!is.Numeric(Scale.arg, positive = TRUE))
+ stop("bad input for argument 'Scale.arg'")
+ if (!is.logical(parallelLocation) ||
+ length(parallelLocation) != 1)
+ stop("bad input for argument 'parallelLocation'")
+
+ fittedMean = FALSE
+ if (!is.logical(fittedMean) || length(fittedMean) != 1)
+ stop("bad input for argument 'fittedMean'")
+
+
+ mystring0 = namesof("location", llocat, earg = elocat)
+ mychars = substring(mystring0, first = 1:nchar(mystring0),
+ last = 1:nchar(mystring0))
+ mychars[nchar(mystring0)] = ", inverse = TRUE)"
+ mystring1 = paste(mychars, collapse = "")
+
+
+
+
+ new("vglmff",
+ blurb = c("One-parameter ",
+ if (llocat == "loge") "log-Laplace" else
+ c(llocat, "-Laplace"),
+ " distribution\n\n",
+ "Links: ", mystring0, "\n", "\n",
+ "Quantiles: ", mystring1),
+ constraints = eval(substitute(expression({
+ constraints = cm.vgam(matrix(1, M, 1), x, .parallelLocation,
+ constraints, intercept = FALSE)
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .parallelLocation = parallelLocation,
+ .Scale.arg = Scale.arg, .zero = zero ))),
+ initialize = eval(substitute(expression({
+ extra$M = M = max(length( .Scale.arg ), length( .kappa )) # Recycle
+ extra$Scale = rep( .Scale.arg, length = M)
+ extra$kappa = rep( .kappa, length = M)
+ extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 1,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+
extra$n = n
extra$y.names = y.names =
paste("tau = ", round(extra$tau, digits = .digt), sep = "")
extra$individual = FALSE
- predictors.names = namesof(paste("quantile(", y.names, ")", sep = ""),
- .llocat, earg = .elocat, tag = FALSE)
+
+
+ predictors.names <-
+ namesof(paste("quantile(", y.names, ")", sep = ""),
+ .llocat , earg = .elocat , tag = FALSE)
if (FALSE) {
@@ -4167,97 +4278,109 @@ loglaplace1.control <- function(maxit = 300, ...)
if ( .llocat == "loge")
locat.init = abs(locat.init)
etastart =
- cbind(theta2eta(locat.init, .llocat, earg = .elocat))
+ cbind(theta2eta(locat.init, .llocat , earg = .elocat ))
}
}), list( .imethod = imethod,
.dfmu.init = dfmu.init, .rep0 = rep0,
.sinit = shrinkage.init, .digt = digt,
- .elocat = elocation, .Scale.arg = Scale.arg,
- .llocat = llocation, .kappa = kappa,
- .ilocat = ilocation ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- location.y = eta2theta(eta, .llocat, earg = .elocat)
- if ( .fittedMean ) {
- stop("Yet to do: handle 'fittedMean = TRUE'")
- kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
- Scale = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
- location.y + Scale * (1/kappamat - kappamat)
- } else {
- if (length(location.y) > extra$n)
- dimnames(location.y) = list(dimnames(eta)[[1]], extra$y.names)
- location.y
- }
- location.y[location.y < .minquantile] = .minquantile
- location.y[location.y > .maxquantile] = .maxquantile
- location.y
- }, list( .elocat = elocation, .llocat = llocation,
- .minquantile = minquantile, .maxquantile = maxquantile,
- .fittedMean = fittedMean, .Scale.arg = Scale.arg,
- .kappa = kappa ))),
- last = eval(substitute(expression({
- misc$link = c(location = .llocat)
- misc$earg = list(location = .elocat)
- misc$expected = TRUE
- extra$kappa = misc$kappa = .kappa
- extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
- extra$Scale.arg = .Scale.arg
- misc$true.mu = .fittedMean # @fitted is not a true mu?
- misc$rep0 = .rep0
- misc$minquantile = .minquantile
- misc$maxquantile = .maxquantile
- extra$percentile = numeric(length(misc$kappa))
- location.y = as.matrix(location.y)
- for(ii in 1:length(misc$kappa))
- extra$percentile[ii] = 100 * weighted.mean(y <= location.y[,ii], w)
- }), list( .elocat = elocation, .llocat = llocation,
- .Scale.arg = Scale.arg, .fittedMean = fittedMean,
- .minquantile = minquantile, .maxquantile = maxquantile,
- .rep0 = rep0, .kappa = kappa ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
- Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
- ymat = matrix(y, extra$n, extra$M)
+ .elocat = elocat, .Scale.arg = Scale.arg,
+ .llocat = llocat, .kappa = kappa,
+ .ilocat = ilocat ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ locat.y = eta2theta(eta, .llocat , earg = .elocat )
+ if ( .fittedMean ) {
+ stop("Yet to do: handle 'fittedMean = TRUE'")
+ kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
+ Scale = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ locat.y + Scale * (1/kappamat - kappamat)
+ } else {
+ if (length(locat.y) > extra$n)
+ dimnames(locat.y) = list(dimnames(eta)[[1]], extra$y.names)
+ locat.y
+ }
+ locat.y[locat.y < .minquantile] = .minquantile
+ locat.y[locat.y > .maxquantile] = .maxquantile
+ locat.y
+ }, list( .elocat = elocat, .llocat = llocat,
+ .minquantile = minquantile, .maxquantile = maxquantile,
+ .fittedMean = fittedMean, .Scale.arg = Scale.arg,
+ .kappa = kappa ))),
+ last = eval(substitute(expression({
+ misc$link = c(location = .llocat)
+ misc$earg = list(location = .elocat )
+ misc$expected = TRUE
+
+ extra$kappa = misc$kappa = .kappa
+ extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
+ extra$Scale.arg = .Scale.arg
+
+ misc$true.mu = .fittedMean # @fitted is not a true mu?
+ misc$rep0 = .rep0
+ misc$minquantile = .minquantile
+ misc$maxquantile = .maxquantile
+
+ extra$percentile = numeric(length(misc$kappa))
+ locat.y = as.matrix(locat.y)
+ for(ii in 1:length(misc$kappa))
+ extra$percentile[ii] = 100 * weighted.mean(y <= locat.y[, ii], w)
+ }), list( .elocat = elocat, .llocat = llocat,
+ .Scale.arg = Scale.arg, .fittedMean = fittedMean,
+ .minquantile = minquantile, .maxquantile = maxquantile,
+ .rep0 = rep0, .kappa = kappa ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
+ Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ ymat = matrix(y, extra$n, extra$M)
- if ( .llocat == "loge")
- ymat = adjust0.loglaplace1(ymat = ymat, y = y, w = w, rep0= .rep0)
- w.mat = theta2eta(ymat, .llocat, earg = .elocat) # e.g., logoff()
+ if ( .llocat == "loge")
+ ymat = adjust0.loglaplace1(ymat = ymat, y = y, w = w, rep0= .rep0)
+ w.mat = theta2eta(ymat, .llocat , earg = .elocat ) # e.g., logoff()
if (residuals) {
stop("loglikelihood residuals not implemented yet")
} else {
- ALDans = sum(w * dalap(x = c(w.mat), location = c(eta),
+ ALDans = sum(c(w) * dalap(x = c(w.mat), locat = c(eta),
scale = c(Scale.w), kappa = c(kappamat),
log = TRUE))
ALDans
}
- }, list( .elocat = elocation, .llocat = llocation,
- .rep0 = rep0,
- .Scale.arg = Scale.arg, .kappa = kappa ))),
- vfamily = c("loglaplace1"),
- deriv = eval(substitute(expression({
- ymat = matrix(y, n, M)
- Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
- location.w = eta
- location.y = eta2theta(location.w, .llocat, earg = .elocat)
- kappamat = matrix(extra$kappa, n, M, byrow = TRUE)
-
- ymat = adjust0.loglaplace1(ymat = ymat, y = y, w = w, rep0= .rep0)
- w.mat = theta2eta(ymat, .llocat, earg = .elocat) # e.g., logit()
- zedd = abs(w.mat-location.w) / Scale.w
- dl.dlocation = ifelse(w.mat >= location.w, kappamat, 1/kappamat) *
- sqrt(2) * sign(w.mat-location.w) / Scale.w
- dlocation.deta = dtheta.deta(location.w, "identity", earg = .elocat)
- c(w) * cbind(dl.dlocation * dlocation.deta)
- }), list( .Scale.arg = Scale.arg, .elocat = elocation,
- .rep0 = rep0,
- .llocat = llocation, .kappa = kappa ))),
- weight = eval(substitute(expression({
- d2l.dlocation2 = 2 / Scale.w^2
- wz = cbind(d2l.dlocation2 * dlocation.deta^2)
- c(w) * wz
- }), list( .Scale.arg = Scale.arg,
- .elocat = elocation, .llocat = llocation ))))
+ }, list( .elocat = elocat, .llocat = llocat,
+ .rep0 = rep0,
+ .Scale.arg = Scale.arg, .kappa = kappa ))),
+ vfamily = c("loglaplace1"),
+ deriv = eval(substitute(expression({
+ ymat = matrix(y, n, M)
+ Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ locat.w = eta
+ locat.y = eta2theta(locat.w, .llocat , earg = .elocat )
+ kappamat = matrix(extra$kappa, n, M, byrow = TRUE)
+
+ ymat = adjust0.loglaplace1(ymat = ymat, y = y, w = w, rep0= .rep0)
+ w.mat = theta2eta(ymat, .llocat , earg = .elocat ) # e.g., logit()
+ zedd = abs(w.mat-locat.w) / Scale.w
+ dl.dlocat = ifelse(w.mat >= locat.w, kappamat, 1/kappamat) *
+ sqrt(2) * sign(w.mat-locat.w) / Scale.w
+
+
+ dlocat.deta = dtheta.deta(locat.w,
+ .llocat.identity ,
+ earg = .elocat.identity )
+ c(w) * cbind(dl.dlocat * dlocat.deta)
+ }), list( .Scale.arg = Scale.arg, .rep0 = rep0,
+ .llocat = llocat, .elocat = elocat,
+ .elocat.identity = elocat.identity,
+ .llocat.identity = llocat.identity,
+
+ .kappa = kappa ))),
+ weight = eval(substitute(expression({
+ ned2l.dlocat2 = 2 / Scale.w^2
+ wz = cbind(ned2l.dlocat2 * dlocat.deta^2)
+ c(w) * wz
+ }), list( .Scale.arg = Scale.arg,
+ .elocat = elocat, .llocat = llocat,
+ .elocat.identity = elocat.identity,
+ .llocat.identity = llocat.identity ))))
}
@@ -4266,19 +4389,19 @@ loglaplace1.control <- function(maxit = 300, ...)
loglaplace2.control <- function(save.weight = TRUE, ...)
{
- list(save.weight = save.weight)
+ list(save.weight = save.weight)
}
- loglaplace2 = function(tau = NULL,
- llocation = "loge", lscale = "loge",
- elocation = list(), escale = list(),
- ilocation = NULL, iscale = NULL,
- kappa = sqrt(tau/(1-tau)),
- shrinkage.init = 0.95, parallelLocation = FALSE, digt = 4,
- sameScale = TRUE,
- dfmu.init = 3,
- rep0 = 0.5, nsimEIM = NULL,
- imethod = 1, zero = "(1 + M/2):M") {
+ loglaplace2 <- function(tau = NULL,
+ llocation = "loge", lscale = "loge",
+ ilocation = NULL, iscale = NULL,
+ kappa = sqrt(tau/(1-tau)),
+ shrinkage.init = 0.95,
+ parallelLocation = FALSE, digt = 4,
+ eq.scale = TRUE,
+ dfmu.init = 3,
+ rep0 = 0.5, nsimEIM = NULL,
+ imethod = 1, zero = "(1 + M/2):M") {
warning("it is best to use loglaplace1()")
if (length(nsimEIM) &&
@@ -4294,10 +4417,18 @@ loglaplace2.control <- function(save.weight = TRUE, ...)
if (length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
stop("arguments 'kappa' and 'tau' do not match")
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
+
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
+ ilocat <- ilocation
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
+
if (!is.Numeric(imethod, allowable.length = 1,
integer.valued = TRUE, positive = TRUE) ||
@@ -4306,8 +4437,6 @@ loglaplace2.control <- function(save.weight = TRUE, ...)
if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
stop("bad input for argument 'iscale'")
- if (!is.list(elocation)) elocation = list()
- if (!is.list(escale)) escale = list()
if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
shrinkage.init < 0 ||
@@ -4317,71 +4446,87 @@ loglaplace2.control <- function(save.weight = TRUE, ...)
!(is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
is.character(zero )))
stop("bad input for argument 'zero'")
- if (!is.logical(sameScale) || length(sameScale) != 1)
- stop("bad input for argument 'sameScale'")
- if (!is.logical(parallelLocation) || length(parallelLocation) != 1)
+ if (!is.logical(eq.scale) || length(eq.scale) != 1)
+ stop("bad input for argument 'eq.scale'")
+ if (!is.logical(parallelLocation) ||
+ length(parallelLocation) != 1)
stop("bad input for argument 'parallelLocation'")
fittedMean = FALSE
if (!is.logical(fittedMean) || length(fittedMean) != 1)
stop("bad input for argument 'fittedMean'")
- if (llocation != "loge")
- stop("argument 'llocation' must be \"loge\"")
+ if (llocat != "loge")
+ stop("argument 'llocat' must be \"loge\"")
- new("vglmff",
- blurb = c("Two-parameter log-Laplace distribution\n\n",
- "Links: ",
- namesof("location", llocation, earg = elocation), ", ",
- namesof("scale", lscale, earg = escale),
- "\n", "\n",
- "Mean: zz location + scale * ",
- "(1/kappa - kappa) / sqrt(2)", "\n",
- "Quantiles: location", "\n",
- "Variance: zz scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
- constraints = eval(substitute(expression({
- .ZERO = .zero
- if (is.character( .ZERO)) .ZERO = eval(parse(text = .ZERO))
- .PARALLEL = .parallelLocation
- parelHmat = if (is.logical( .PARALLEL ) && .PARALLEL )
- matrix(1, M/2, 1) else diag(M/2)
- scaleHmat = if (is.logical( .sameScale ) && .sameScale )
- matrix(1, M/2, 1) else diag(M/2)
- mycmatrix = cbind(rbind( parelHmat, 0*parelHmat),
- rbind(0*scaleHmat, scaleHmat))
- constraints = cm.vgam(mycmatrix, x, .PARALLEL, constraints,
- int = FALSE)
- constraints = cm.zero.vgam(constraints, x, .ZERO, M)
-
- if ( .PARALLEL && names(constraints)[1] == "(Intercept)") {
- parelHmat = diag(M/2)
- mycmatrix = cbind(rbind( parelHmat, 0*parelHmat),
- rbind(0*scaleHmat, scaleHmat))
- constraints[["(Intercept)"]] = mycmatrix
- }
- if (is.logical( .sameScale) && .sameScale &&
- names(constraints)[1] == "(Intercept)") {
- temp3 = constraints[["(Intercept)"]]
- temp3 = cbind(temp3[,1:(M/2)], rbind(0*scaleHmat, scaleHmat))
- constraints[["(Intercept)"]] = temp3
- }
- }), list( .sameScale=sameScale, .parallelLocation = parallelLocation,
+ new("vglmff",
+ blurb = c("Two-parameter log-Laplace distribution\n\n",
+ "Links: ",
+ namesof("location", llocat, earg = elocat), ", ",
+ namesof("scale", lscale, earg = escale),
+ "\n", "\n",
+ "Mean: zz location + scale * ",
+ "(1/kappa - kappa) / sqrt(2)", "\n",
+ "Quantiles: location", "\n",
+ "Variance: zz scale^2 * (1 + kappa^4) / (2 * kappa^2)"),
+ constraints = eval(substitute(expression({
+ .ZERO = .zero
+ if (is.character( .ZERO)) .ZERO = eval(parse(text = .ZERO))
+ .PARALLEL = .parallelLocation
+ parelHmat = if (is.logical( .PARALLEL ) && .PARALLEL )
+ matrix(1, M/2, 1) else diag(M/2)
+ scaleHmat = if (is.logical( .eq.scale ) && .eq.scale )
+ matrix(1, M/2, 1) else diag(M/2)
+ mycmatrix = cbind(rbind( parelHmat, 0*parelHmat),
+ rbind(0*scaleHmat, scaleHmat))
+ constraints = cm.vgam(mycmatrix, x, .PARALLEL, constraints,
+ int = FALSE)
+ constraints = cm.zero.vgam(constraints, x, .ZERO, M)
+
+ if ( .PARALLEL && names(constraints)[1] == "(Intercept)") {
+ parelHmat = diag(M/2)
+ mycmatrix = cbind(rbind( parelHmat, 0*parelHmat),
+ rbind(0*scaleHmat, scaleHmat))
+ constraints[["(Intercept)"]] = mycmatrix
+ }
+ if (is.logical( .eq.scale) && .eq.scale &&
+ names(constraints)[1] == "(Intercept)") {
+ temp3 = constraints[["(Intercept)"]]
+ temp3 = cbind(temp3[,1:(M/2)], rbind(0*scaleHmat, scaleHmat))
+ constraints[["(Intercept)"]] = temp3
+ }
+ }), list( .eq.scale = eq.scale, .parallelLocation = parallelLocation,
.zero = zero ))),
- initialize = eval(substitute(expression({
- extra$kappa = .kappa
- extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
- if (ncol(y <- cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- extra$M = M = 2 * length(extra$kappa)
- extra$n = n
- extra$y.names = y.names =
- paste("tau = ", round(extra$tau, digits = .digt), sep = "")
- extra$individual = FALSE
- predictors.names =
- c(namesof(paste("quantile(", y.names, ")", sep = ""),
- .llocat, earg = .elocat, tag = FALSE),
- namesof(if (M == 2) "scale" else paste("scale", 1:(M/2), sep = ""),
- .lscale, earg = .escale, tag = FALSE))
+ initialize = eval(substitute(expression({
+ extra$kappa = .kappa
+ extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
+
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 1,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+
+ extra$M = M = 2 * length(extra$kappa)
+ extra$n = n
+ extra$y.names = y.names =
+ paste("tau = ", round(extra$tau, digits = .digt), sep = "")
+ extra$individual = FALSE
+
+ predictors.names <-
+ c(namesof(paste("quantile(", y.names, ")", sep = ""),
+ .llocat , earg = .elocat, tag = FALSE),
+ namesof(if (M == 2) "scale" else
+ paste("scale", 1:(M/2), sep = ""),
+ .lscale , earg = .escale, tag = FALSE))
if (weighted.mean(1 * (y < 0.001), w) >= min(extra$tau))
stop("sample proportion of 0s > minimum 'tau' value. ",
"Choose larger values for 'tau'.")
@@ -4392,19 +4537,19 @@ loglaplace2.control <- function(save.weight = TRUE, ...)
scale.init = sqrt(var(y) / 2)
} else if ( .imethod == 2) {
locat.init.y = median(y)
- scale.init = sqrt(sum(w*abs(y-median(y))) / (sum(w) *2))
+ scale.init = sqrt(sum(c(w)*abs(y-median(y))) / (sum(w) *2))
} else if ( .imethod == 3) {
Fit5 = vsmooth.spline(x = x[, min(ncol(x), 2)], y = y, w = w,
df = .dfmu.init)
locat.init.y = c(predict(Fit5, x = x[, min(ncol(x), 2)])$y)
- scale.init = sqrt(sum(w*abs(y-median(y))) / (sum(w) *2))
+ scale.init = sqrt(sum(c(w)*abs(y-median(y))) / (sum(w) *2))
} else {
use.this = weighted.mean(y, w)
locat.init.y = (1- .sinit)*y + .sinit * use.this
- scale.init = sqrt(sum(w*abs(y-median(y ))) / (sum(w) *2))
+ scale.init = sqrt(sum(c(w)*abs(y-median(y ))) / (sum(w) *2))
}
- locat.init.y = if (length( .ilocat))
- rep( .ilocat, length.out = n) else
+ locat.init.y = if (length( .ilocat ))
+ rep( .ilocat , length.out = n) else
rep(locat.init.y, length.out = n)
locat.init.y = matrix(locat.init.y, n, M/2)
scale.init = if (length( .iscale))
@@ -4412,132 +4557,136 @@ loglaplace2.control <- function(save.weight = TRUE, ...)
rep(scale.init, length.out = n)
scale.init = matrix(scale.init, n, M/2)
etastart =
- cbind(theta2eta(locat.init.y, .llocat, earg = .elocat),
- theta2eta(scale.init, .lscale, earg = .escale))
+ cbind(theta2eta(locat.init.y, .llocat , earg = .elocat ),
+ theta2eta(scale.init, .lscale , earg = .escale ))
}
}), list( .imethod = imethod,
- .dfmu.init = dfmu.init,
+ .dfmu.init = dfmu.init, .kappa = kappa,
.sinit = shrinkage.init, .digt = digt,
- .elocat = elocation, .escale = escale,
- .llocat = llocation, .lscale = lscale, .kappa = kappa,
- .ilocat = ilocation, .iscale = iscale ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- location.y = eta2theta(eta[,1:(extra$M/2), drop = FALSE],
- .llocat, earg = .elocat)
- if ( .fittedMean ) {
- kappamat = matrix(extra$kappa, extra$n, extra$M/2, byrow = TRUE)
- Scale.y = eta2theta(eta[,(1+extra$M/2):extra$M], .lscale, earg = .escale)
- location.y + Scale.y * (1/kappamat - kappamat)
- } else {
- dimnames(location.y) = list(dimnames(eta)[[1]], extra$y.names)
- location.y
- }
- }, list( .elocat = elocation, .llocat = llocation,
- .fittedMean = fittedMean, .escale = escale, .lscale = lscale,
- .kappa = kappa ))),
- last = eval(substitute(expression({
- misc$link = c(location = .llocat, scale = .lscale)
- misc$earg = list(location = .elocat, scale = .escale)
- misc$expected = TRUE
- extra$kappa = misc$kappa = .kappa
- extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
- misc$true.mu = .fittedMean # @fitted is not a true mu?
- misc$nsimEIM = .nsimEIM
- misc$rep0 = .rep0
+ .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale,
+ .ilocat = ilocat, .iscale = iscale ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ locat.y = eta2theta(eta[,1:(extra$M/2), drop = FALSE],
+ .llocat , earg = .elocat )
+ if ( .fittedMean ) {
+ kappamat = matrix(extra$kappa, extra$n, extra$M/2,
+ byrow = TRUE)
+ Scale.y = eta2theta(eta[,(1+extra$M/2):extra$M],
+ .lscale , earg = .escale )
+ locat.y + Scale.y * (1/kappamat - kappamat)
+ } else {
+ dimnames(locat.y) = list(dimnames(eta)[[1]], extra$y.names)
+ locat.y
+ }
+ }, list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale,
+ .fittedMean = fittedMean,
+ .kappa = kappa ))),
+ last = eval(substitute(expression({
+ misc$link = c(location = .llocat , scale = .lscale )
+ misc$earg = list(location = .elocat , scale = .escale )
+
+ misc$expected = TRUE
+ extra$kappa = misc$kappa = .kappa
+ extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
+ misc$true.mu = .fittedMean # @fitted is not a true mu?
+ misc$nsimEIM = .nsimEIM
+ misc$rep0 = .rep0
extra$percentile = numeric(length(misc$kappa))
- location = as.matrix(location.y)
+ locat = as.matrix(locat.y)
for(ii in 1:length(misc$kappa))
extra$percentile[ii] = 100 *
- weighted.mean(y <= location.y[,ii], w)
- }), list( .elocat = elocation, .llocat = llocation,
- .escale = escale, .lscale = lscale,
- .fittedMean = fittedMean,
- .nsimEIM = nsimEIM, .rep0 = rep0,
- .kappa = kappa ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- kappamat = matrix(extra$kappa, extra$n, extra$M/2, byrow = TRUE)
- Scale.w = eta2theta(eta[,(1+extra$M/2):extra$M],
- .lscale, earg = .escale)
- ymat = matrix(y, extra$n, extra$M/2)
- ymat[ymat <= 0] = min(min(y[y > 0]), .rep0) # Adjust for 0s
- ell.mat = matrix(c(dloglaplace(x = c(ymat),
- location.ald = c(eta[,1:(extra$M/2)]),
- scale.ald = c(Scale.w),
- kappa = c(kappamat), log = TRUE)),
- extra$n, extra$M/2)
- if (residuals) {
- stop("loglikelihood residuals not implemented yet")
- } else {
- sum(w * ell.mat)
+ weighted.mean(y <= locat.y[, ii], w)
+ }), list( .elocat = elocat, .llocat = llocat,
+ .escale = escale, .lscale = lscale,
+ .fittedMean = fittedMean,
+ .nsimEIM = nsimEIM, .rep0 = rep0,
+ .kappa = kappa ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ kappamat = matrix(extra$kappa, extra$n, extra$M/2, byrow = TRUE)
+ Scale.w = eta2theta(eta[,(1+extra$M/2):extra$M],
+ .lscale , earg = .escale )
+ ymat = matrix(y, extra$n, extra$M/2)
+ ymat[ymat <= 0] = min(min(y[y > 0]), .rep0) # Adjust for 0s
+ ell.mat = matrix(c(dloglaplace(x = c(ymat),
+ locat.ald = c(eta[,1:(extra$M/2)]),
+ scale.ald = c(Scale.w),
+ kappa = c(kappamat), log = TRUE)),
+ extra$n, extra$M/2)
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ sum(c(w) * ell.mat)
+ }
+ }, list( .elocat = elocat, .llocat = llocat,
+ .escale = escale, .lscale = lscale,
+ .rep0 = rep0, .kappa = kappa ))),
+ vfamily = c("loglaplace2"),
+ deriv = eval(substitute(expression({
+ ymat = matrix(y, n, M/2)
+ Scale.w = eta2theta(eta[,(1+extra$M/2):extra$M],
+ .lscale , earg = .escale )
+ locat.w = eta[,1:(extra$M/2), drop = FALSE]
+ locat.y = eta2theta(locat.w, .llocat , earg = .elocat )
+ kappamat = matrix(extra$kappa, n, M/2, byrow = TRUE)
+ w.mat = ymat
+ w.mat[w.mat <= 0] = min(min(w.mat[w.mat > 0]), .rep0) # Adjust for 0s
+ w.mat= theta2eta(w.mat, .llocat , earg = .elocat ) # w.mat=log(w.mat)
+ zedd = abs(w.mat-locat.w) / Scale.w
+ dl.dlocat = sqrt(2) *
+ ifelse(w.mat >= locat.w, kappamat, 1/kappamat) *
+ sign(w.mat-locat.w) / Scale.w
+ dl.dscale = sqrt(2) *
+ ifelse(w.mat >= locat.w, kappamat, 1/kappamat) *
+ zedd / Scale.w - 1 / Scale.w
+ dlocat.deta = dtheta.deta(locat.w, .llocat , earg = .elocat )
+ dscale.deta = dtheta.deta(Scale.w, .lscale , earg = .escale )
+ c(w) * cbind(dl.dlocat * dlocat.deta,
+ dl.dscale * dscale.deta)
+ }), list( .escale = escale, .lscale = lscale,
+ .elocat = elocat, .llocat = llocat,
+ .rep0 = rep0, .kappa = kappa ))),
+ weight = eval(substitute(expression({
+ run.varcov = 0
+ ind1 = iam(NA, NA, M=M, both = TRUE, diag = TRUE)
+ dthetas.detas = cbind(dlocat.deta, dscale.deta)
+ if (length( .nsimEIM )) {
+ for(ii in 1:( .nsimEIM )) {
+ wsim = matrix(rloglap(n*M/2, loc = c(locat.w),
+ sca = c(Scale.w),
+ kappa = c(kappamat)), n, M/2)
+ zedd = abs(wsim-locat.w) / Scale.w
+ dl.dlocat = sqrt(2) *
+ ifelse(wsim >= locat.w, kappamat, 1/kappamat) *
+ sign(wsim-locat.w) / Scale.w
+ dl.dscale = sqrt(2) *
+ ifelse(wsim >= locat.w, kappamat, 1/kappamat) *
+ zedd / Scale.w - 1 / Scale.w
+
+ rm(wsim)
+ temp3 = cbind(dl.dlocat, dl.dscale) # n x M matrix
+ run.varcov = ((ii-1) * run.varcov +
+ temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
}
- }, list( .elocat = elocation, .llocat = llocation,
- .escale = escale, .lscale = lscale,
- .rep0 = rep0, .kappa = kappa ))),
- vfamily = c("loglaplace2"),
- deriv = eval(substitute(expression({
- ymat = matrix(y, n, M/2)
- Scale.w = eta2theta(eta[,(1+extra$M/2):extra$M],
- .lscale, earg = .escale)
- location.w = eta[,1:(extra$M/2), drop = FALSE]
- location.y = eta2theta(location.w, .llocat, earg = .elocat)
- kappamat = matrix(extra$kappa, n, M/2, byrow = TRUE)
- w.mat = ymat
- w.mat[w.mat <= 0] = min(min(w.mat[w.mat > 0]), .rep0) # Adjust for 0s
- w.mat= theta2eta(w.mat, .llocat, earg = .elocat) # w.mat=log(w.mat)
- zedd = abs(w.mat-location.w) / Scale.w
- dl.dlocation = sqrt(2) *
- ifelse(w.mat >= location.w, kappamat, 1/kappamat) *
- sign(w.mat-location.w) / Scale.w
- dl.dscale = sqrt(2) *
- ifelse(w.mat >= location.w, kappamat, 1/kappamat) *
- zedd / Scale.w - 1 / Scale.w
- dlocation.deta = dtheta.deta(location.w, .llocat, earg = .elocat)
- dscale.deta = dtheta.deta(Scale.w, .lscale, earg = .escale)
- c(w) * cbind(dl.dlocation * dlocation.deta,
- dl.dscale * dscale.deta)
- }), list( .escale = escale, .lscale = lscale,
- .elocat = elocation, .llocat = llocation,
- .rep0 = rep0, .kappa = kappa ))),
- weight = eval(substitute(expression({
- run.varcov = 0
- ind1 = iam(NA, NA, M=M, both = TRUE, diag = TRUE)
- dthetas.detas = cbind(dlocation.deta, dscale.deta)
- if (length( .nsimEIM )) {
- for(ii in 1:( .nsimEIM )) {
- wsim = matrix(rloglap(n*M/2, loc = c(location.w),
- sca = c(Scale.w),
- kappa = c(kappamat)), n, M/2)
- zedd = abs(wsim-location.w) / Scale.w
- dl.dlocation = sqrt(2) *
- ifelse(wsim >= location.w, kappamat, 1/kappamat) *
- sign(wsim-location.w) / Scale.w
- dl.dscale = sqrt(2) *
- ifelse(wsim >= location.w, kappamat, 1/kappamat) *
- zedd / Scale.w - 1 / Scale.w
-
- rm(wsim)
- temp3 = cbind(dl.dlocation, dl.dscale) # n x M matrix
- run.varcov = ((ii-1) * run.varcov +
- temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
- }
- wz = if (intercept.only)
- matrix(colMeans(run.varcov),
- n, ncol(run.varcov), byrow = TRUE) else run.varcov
+ wz = if (intercept.only)
+ matrix(colMeans(run.varcov),
+ n, ncol(run.varcov), byrow = TRUE) else run.varcov
- wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
- wz = c(w) * matrix(wz, n, dimm(M))
- wz
- } else {
- d2l.dlocation2 = 2 / (Scale.w * location.w)^2
- d2l.dscale2 = 1 / Scale.w^2
- wz = cbind(d2l.dlocation2 * dlocation.deta^2,
- d2l.dscale2 * dscale.deta^2)
- c(w) * wz
- }
- }), list( .elocat = elocation, .escale = escale,
- .llocat = llocation, .lscale = lscale,
- .nsimEIM = nsimEIM) )))
+ wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
+ wz = c(w) * matrix(wz, n, dimm(M))
+ wz
+ } else {
+ d2l.dlocat2 = 2 / (Scale.w * locat.w)^2
+ d2l.dscale2 = 1 / Scale.w^2
+ wz = cbind(d2l.dlocat2 * dlocat.deta^2,
+ d2l.dscale2 * dscale.deta^2)
+ c(w) * wz
+ }
+ }), list( .elocat = elocat, .escale = escale,
+ .llocat = llocat, .lscale = lscale,
+ .nsimEIM = nsimEIM) )))
}
@@ -4551,7 +4700,7 @@ logitlaplace1.control <- function(maxit = 300, ...)
}
-adjust01.logitlaplace1 = function(ymat, y, w, rep01) {
+adjust01.logitlaplace1 <- function(ymat, y, w, rep01) {
rangey01 = range(y[(y > 0) & (y < 1)])
ymat[ymat <= 0] = min(rangey01[1] / 2, rep01 / w[y <= 0])
ymat[ymat >= 1] = max((1 + rangey01[2]) / 2, 1 - rep01 / w[y >= 1])
@@ -4562,9 +4711,8 @@ adjust01.logitlaplace1 = function(ymat, y, w, rep01) {
- logitlaplace1 = function(tau = NULL,
+ logitlaplace1 <- function(tau = NULL,
llocation = "logit",
- elocation = list(),
ilocation = NULL,
kappa = sqrt(tau/(1-tau)),
Scale.arg = 1,
@@ -4573,193 +4721,236 @@ adjust01.logitlaplace1 = function(ymat, y, w, rep01) {
rep01 = 0.5,
imethod = 1, zero = NULL) {
- if (!is.Numeric(rep01, positive = TRUE, allowable.length = 1) ||
- rep01 > 0.5)
- stop("bad input for argument 'rep01'")
- if (!is.Numeric(kappa, positive = TRUE))
- stop("bad input for argument 'kappa'")
+ if (!is.Numeric(rep01, positive = TRUE, allowable.length = 1) ||
+ rep01 > 0.5)
+ stop("bad input for argument 'rep01'")
+ if (!is.Numeric(kappa, positive = TRUE))
+ stop("bad input for argument 'kappa'")
- if (length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
- stop("arguments 'kappa' and 'tau' do not match")
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 4)
- stop("argument 'imethod' must be 1, 2 or ... 4")
-
- if (!is.list(elocation)) elocation = list()
- if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
- shrinkage.init < 0 ||
- shrinkage.init > 1)
- stop("bad input for argument 'shrinkage.init'")
- if (length(zero) &&
- !(is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
- is.character(zero )))
- stop("bad input for argument 'zero'")
-
- if (!is.Numeric(Scale.arg, positive = TRUE))
- stop("bad input for argument 'Scale.arg'")
- if (!is.logical(parallelLocation) ||
- length(parallelLocation) != 1)
- stop("bad input for argument 'parallelLocation'")
- fittedMean = FALSE
- if (!is.logical(fittedMean) ||
- length(fittedMean) != 1)
- stop("bad input for argument 'fittedMean'")
-
-
- mystring0 = namesof("location", llocation, earg = elocation)
- mychars = substring(mystring0, first = 1:nchar(mystring0),
- last = 1:nchar(mystring0))
- mychars[nchar(mystring0)] = ", inverse = TRUE)"
- mystring1 = paste(mychars, collapse = "")
-
-
- new("vglmff",
- blurb = c("One-parameter ", llocation, "-Laplace distribution\n\n",
- "Links: ", mystring0, "\n", "\n",
- "Quantiles: ", mystring1),
- constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1, M, 1), x, .parallelLocation,
- constraints, intercept = FALSE)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .parallelLocation = parallelLocation,
- .Scale.arg = Scale.arg, .zero = zero ))),
- initialize = eval(substitute(expression({
- extra$M = M = max(length( .Scale.arg ), length( .kappa )) # Recycle
- extra$Scale = rep( .Scale.arg, length = M)
- extra$kappa = rep( .kappa, length = M)
- extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
- if (ncol(y <- cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- extra$n = n
- extra$y.names = y.names =
- paste("tau = ", round(extra$tau, digits = .digt), sep = "")
- extra$individual = FALSE
- predictors.names =
- namesof(paste("quantile(", y.names, ")", sep = ""),
- .llocat, earg = .elocat, tag = FALSE)
+ if (length(tau) && max(abs(kappa - sqrt(tau/(1-tau)))) > 1.0e-6)
+ stop("arguments 'kappa' and 'tau' do not match")
- if (all(y == 0 | y == 1))
- stop("response cannot be all 0s or 1s")
- if (min(y) < 0)
- stop("negative response values detected")
- if (max(y) > 1)
- stop("response values greater than 1 detected")
- if ((prop.0. <- weighted.mean(1*(y == 0), w)) >= min(extra$tau))
- stop("sample proportion of 0s == ", round(prop.0., digits = 4),
- " > minimum 'tau' value. Choose larger values for 'tau'.")
- if ((prop.1. <- weighted.mean(1*(y == 1), w)) >= max(extra$tau))
- stop("sample proportion of 1s == ", round(prop.1., digits = 4),
- " < maximum 'tau' value. Choose smaller values for 'tau'.")
- if (!length(etastart)) {
- if ( .imethod == 1) {
- locat.init = quantile(rep(y, w), probs= extra$tau)
- } else if ( .imethod == 2) {
- locat.init = weighted.mean(y, w)
- locat.init = median(rep(y, w))
- } else if ( .imethod == 3) {
- use.this = weighted.mean(y, w)
- locat.init = (1- .sinit)*y + use.this * .sinit
- } else {
- stop("this option not implemented")
- }
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
+ ilocat <- ilocation
- locat.init = if (length( .ilocat))
- rep( .ilocat, length.out = M) else
- rep(locat.init, length.out = M)
- locat.init = matrix(locat.init, n, M, byrow = TRUE)
- locat.init = abs(locat.init)
- etastart =
- cbind(theta2eta(locat.init, .llocat, earg = .elocat))
- }
- }), list( .imethod = imethod,
- .dfmu.init = dfmu.init,
- .sinit = shrinkage.init, .digt = digt,
- .elocat = elocation, .Scale.arg = Scale.arg,
- .llocat = llocation, .kappa = kappa,
- .ilocat = ilocation ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- location.y = eta2theta(eta, .llocat, earg = .elocat)
- if ( .fittedMean ) {
- stop("Yet to do: handle 'fittedMean = TRUE'")
- kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
- Scale = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
- location.y + Scale * (1/kappamat - kappamat)
- } else {
- if (length(location.y) > extra$n)
- dimnames(location.y) = list(dimnames(eta)[[1]], extra$y.names)
- location.y
- }
- }, list( .elocat = elocation, .llocat = llocation,
- .fittedMean = fittedMean, .Scale.arg = Scale.arg,
- .kappa = kappa ))),
- last = eval(substitute(expression({
- misc$link = c(location = .llocat)
- misc$earg = list(location = .elocat)
- misc$expected = TRUE
- extra$kappa = misc$kappa = .kappa
- extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
- extra$Scale.arg = .Scale.arg
- misc$true.mu = .fittedMean # @fitted is not a true mu?
- misc$rep01 = .rep01
- extra$percentile = numeric(length(misc$kappa))
- location.y = eta2theta(eta, .llocat, earg = .elocat)
- location.y = as.matrix(location.y)
- for(ii in 1:length(misc$kappa))
- extra$percentile[ii] = 100 *
- weighted.mean(y <= location.y[,ii], w)
+ llocat.identity <- as.list(substitute("identity"))
+ elocat.identity <- link2list(llocat.identity)
+ llocat.identity <- attr(elocat.identity, "function.name")
- }), list( .elocat = elocation, .llocat = llocation,
- .Scale.arg = Scale.arg, .fittedMean = fittedMean,
- .rep01 = rep01,
- .kappa = kappa ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
- Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
- ymat = matrix(y, extra$n, extra$M)
- ymat = adjust01.logitlaplace1(ymat = ymat, y = y, w = w,
- rep01 = .rep01)
- w.mat = theta2eta(ymat, .llocat, earg = .elocat) # e.g., logit()
- if (residuals) {
- stop("loglikelihood residuals not implemented yet")
+
+
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 4)
+ stop("argument 'imethod' must be 1, 2 or ... 4")
+
+ if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+ shrinkage.init < 0 ||
+ shrinkage.init > 1)
+ stop("bad input for argument 'shrinkage.init'")
+ if (length(zero) &&
+ !(is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
+ is.character(zero )))
+ stop("bad input for argument 'zero'")
+
+ if (!is.Numeric(Scale.arg, positive = TRUE))
+ stop("bad input for argument 'Scale.arg'")
+ if (!is.logical(parallelLocation) ||
+ length(parallelLocation) != 1)
+ stop("bad input for argument 'parallelLocation'")
+ fittedMean = FALSE
+ if (!is.logical(fittedMean) ||
+ length(fittedMean) != 1)
+ stop("bad input for argument 'fittedMean'")
+
+
+ mystring0 = namesof("location", llocat, earg = elocat)
+ mychars = substring(mystring0, first = 1:nchar(mystring0),
+ last = 1:nchar(mystring0))
+ mychars[nchar(mystring0)] = ", inverse = TRUE)"
+ mystring1 = paste(mychars, collapse = "")
+
+
+
+
+ new("vglmff",
+ blurb = c("One-parameter ", llocat, "-Laplace distribution\n\n",
+ "Links: ", mystring0, "\n", "\n",
+ "Quantiles: ", mystring1),
+ constraints = eval(substitute(expression({
+ constraints = cm.vgam(matrix(1, M, 1), x, .parallelLocation,
+ constraints, intercept = FALSE)
+ constraints = cm.zero.vgam(constraints, x, .zero, M)
+ }), list( .parallelLocation = parallelLocation,
+ .Scale.arg = Scale.arg, .zero = zero ))),
+ initialize = eval(substitute(expression({
+ extra$M = M = max(length( .Scale.arg ), length( .kappa )) # Recycle
+ extra$Scale = rep( .Scale.arg, length = M)
+ extra$kappa = rep( .kappa, length = M)
+ extra$tau = extra$kappa^2 / (1 + extra$kappa^2)
+
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 1,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+
+
+ extra$n = n
+ extra$y.names = y.names =
+ paste("tau = ", round(extra$tau, digits = .digt), sep = "")
+ extra$individual = FALSE
+
+ predictors.names <-
+ namesof(paste("quantile(", y.names, ")", sep = ""),
+ .llocat , earg = .elocat, tag = FALSE)
+
+ if (all(y == 0 | y == 1))
+ stop("response cannot be all 0s or 1s")
+ if (min(y) < 0)
+ stop("negative response values detected")
+ if (max(y) > 1)
+ stop("response values greater than 1 detected")
+ if ((prop.0. <- weighted.mean(1*(y == 0), w)) >= min(extra$tau))
+ stop("sample proportion of 0s == ", round(prop.0., digits = 4),
+ " > minimum 'tau' value. Choose larger values for 'tau'.")
+ if ((prop.1. <- weighted.mean(1*(y == 1), w)) >= max(extra$tau))
+ stop("sample proportion of 1s == ", round(prop.1., digits = 4),
+ " < maximum 'tau' value. Choose smaller values for 'tau'.")
+ if (!length(etastart)) {
+ if ( .imethod == 1) {
+ locat.init = quantile(rep(y, w), probs= extra$tau)
+ } else if ( .imethod == 2) {
+ locat.init = weighted.mean(y, w)
+ locat.init = median(rep(y, w))
+ } else if ( .imethod == 3) {
+ use.this = weighted.mean(y, w)
+ locat.init = (1- .sinit)*y + use.this * .sinit
} else {
- ALDans =
- sum(w * dalap(x = c(w.mat), location = c(eta),
- scale = c(Scale.w), kappa = c(kappamat),
- log = TRUE))
- ALDans
+ stop("this option not implemented")
}
- }, list( .elocat = elocation, .llocat = llocation,
- .rep01 = rep01,
- .Scale.arg = Scale.arg, .kappa = kappa ))),
- vfamily = c("logitlaplace1"),
- deriv = eval(substitute(expression({
- ymat = matrix(y, n, M)
- Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
- location.w = eta
- kappamat = matrix(extra$kappa, n, M, byrow = TRUE)
- ymat = adjust01.logitlaplace1(ymat = ymat, y = y, w = w,
- rep01 = .rep01)
- w.mat = theta2eta(ymat, .llocat, earg = .elocat) # e.g., logit()
- zedd = abs(w.mat-location.w) / Scale.w
- dl.dlocation = ifelse(w.mat >= location.w, kappamat, 1/kappamat) *
- sqrt(2) * sign(w.mat-location.w) / Scale.w
- dlocation.deta = dtheta.deta(location.w, "identity", earg = .elocat)
- c(w) * cbind(dl.dlocation * dlocation.deta)
- }), list( .Scale.arg = Scale.arg, .elocat = elocation,
- .rep01 = rep01,
- .llocat = llocation, .kappa = kappa ))),
- weight = eval(substitute(expression({
- d2l.dlocation2 = 2 / Scale.w^2
- wz = cbind(d2l.dlocation2 * dlocation.deta^2)
- c(w) * wz
- }), list( .Scale.arg = Scale.arg,
- .elocat = elocation, .llocat = llocation ))))
+
+
+ locat.init = if (length( .ilocat ))
+ rep( .ilocat , length.out = M) else
+ rep(locat.init, length.out = M)
+ locat.init = matrix(locat.init, n, M, byrow = TRUE)
+ locat.init = abs(locat.init)
+ etastart =
+ cbind(theta2eta(locat.init, .llocat , earg = .elocat ))
+ }
+ }), list( .imethod = imethod,
+ .dfmu.init = dfmu.init,
+ .sinit = shrinkage.init, .digt = digt,
+ .elocat = elocat, .Scale.arg = Scale.arg,
+ .llocat = llocat, .kappa = kappa,
+ .ilocat = ilocat ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ locat.y = eta2theta(eta, .llocat , earg = .elocat )
+ if ( .fittedMean ) {
+ stop("Yet to do: handle 'fittedMean = TRUE'")
+ kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
+ Scale = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ locat.y + Scale * (1/kappamat - kappamat)
+ } else {
+ if (length(locat.y) > extra$n)
+ dimnames(locat.y) = list(dimnames(eta)[[1]], extra$y.names)
+ locat.y
+ }
+ }, list( .elocat = elocat, .llocat = llocat,
+ .fittedMean = fittedMean, .Scale.arg = Scale.arg,
+ .kappa = kappa ))),
+ last = eval(substitute(expression({
+ misc$link = c(location = .llocat )
+ misc$earg = list(location = .elocat )
+
+ misc$expected = TRUE
+
+ extra$kappa = misc$kappa = .kappa
+ extra$tau = misc$tau = misc$kappa^2 / (1 + misc$kappa^2)
+ extra$Scale.arg = .Scale.arg
+
+ misc$true.mu = .fittedMean # @fitted is not a true mu?
+ misc$rep01 = .rep01
+
+ extra$percentile = numeric(length(misc$kappa))
+ locat.y = eta2theta(eta, .llocat , earg = .elocat )
+ locat.y = as.matrix(locat.y)
+ for(ii in 1:length(misc$kappa))
+ extra$percentile[ii] = 100 *
+ weighted.mean(y <= locat.y[, ii], w)
+
+ }), list( .elocat = elocat, .llocat = llocat,
+ .Scale.arg = Scale.arg, .fittedMean = fittedMean,
+ .rep01 = rep01,
+ .kappa = kappa ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ kappamat = matrix(extra$kappa, extra$n, extra$M, byrow = TRUE)
+ Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ ymat = matrix(y, extra$n, extra$M)
+ ymat = adjust01.logitlaplace1(ymat = ymat, y = y, w = w,
+ rep01 = .rep01)
+ w.mat = theta2eta(ymat, .llocat , earg = .elocat ) # e.g., logit()
+ if (residuals) {
+ stop("loglikelihood residuals not implemented yet")
+ } else {
+ ALDans =
+ sum(c(w) * dalap(x = c(w.mat), location = c(eta),
+ scale = c(Scale.w), kappa = c(kappamat),
+ log = TRUE))
+ ALDans
+ }
+ }, list( .elocat = elocat, .llocat = llocat,
+ .rep01 = rep01,
+ .Scale.arg = Scale.arg, .kappa = kappa ))),
+ vfamily = c("logitlaplace1"),
+ deriv = eval(substitute(expression({
+ ymat = matrix(y, n, M)
+ Scale.w = matrix(extra$Scale, extra$n, extra$M, byrow = TRUE)
+ locat.w = eta
+ kappamat = matrix(extra$kappa, n, M, byrow = TRUE)
+ ymat = adjust01.logitlaplace1(ymat = ymat, y = y, w = w,
+ rep01 = .rep01)
+ w.mat = theta2eta(ymat, .llocat , earg = .elocat ) # e.g., logit()
+ zedd = abs(w.mat-locat.w) / Scale.w
+ dl.dlocat = ifelse(w.mat >= locat.w, kappamat, 1/kappamat) *
+ sqrt(2) * sign(w.mat-locat.w) / Scale.w
+
+
+ dlocat.deta = dtheta.deta(locat.w,
+ "identity",
+ earg = .elocat.identity )
+
+
+ c(w) * cbind(dl.dlocat * dlocat.deta)
+ }), list( .Scale.arg = Scale.arg, .rep01 = rep01,
+ .elocat = elocat,
+ .llocat = llocat,
+
+ .elocat.identity = elocat.identity,
+ .llocat.identity = llocat.identity,
+
+ .kappa = kappa ))),
+ weight = eval(substitute(expression({
+ d2l.dlocat2 = 2 / Scale.w^2
+ wz = cbind(d2l.dlocat2 * dlocat.deta^2)
+ c(w) * wz
+ }), list( .Scale.arg = Scale.arg,
+ .elocat = elocat, .llocat = llocat ))))
}
diff --git a/R/family.quantal.R b/R/family.quantal.R
index 14c7821..c132ea8 100644
--- a/R/family.quantal.R
+++ b/R/family.quantal.R
@@ -14,12 +14,12 @@
- abbott = function(link0 = "logit", earg0 = list(),
- link1 = "logit", earg1 = list(),
- iprob0 = NULL, iprob1 = NULL,
- fitted.type = c("observed", "treatment", "control"),
- mux.offdiagonal = 0.98,
- zero = 1) {
+ abbott <- function(link0 = "logit",
+ link1 = "logit",
+ iprob0 = NULL, iprob1 = NULL,
+ fitted.type = c("observed", "treatment", "control"),
+ mux.offdiagonal = 0.98,
+ zero = 1) {
fitted.type <- match.arg(fitted.type,
@@ -27,13 +27,16 @@
several.ok = TRUE)
- if (mode(link0) != "character" && mode(link0) != "name")
- link0 <- as.character(substitute(link0))
- if (!is.list(earg0)) earg0 = list()
+ link0 <- as.list(substitute(link0))
+ earg0 <- link2list(link0)
+ link0 <- attr(earg0, "function.name")
+
+ link1 <- as.list(substitute(link1))
+ earg1 <- link2list(link1)
+ link1 <- attr(earg1, "function.name")
+
+
- if (mode(link1) != "character" && mode(link1) != "name")
- link1 <- as.character(substitute(link1))
- if (!is.list(earg1)) earg1 = list()
if (!is.Numeric(mux.offdiagonal, allowable.length = 1) ||
mux.offdiagonal >= 1 ||
@@ -56,7 +59,7 @@
))),
initialize = eval(substitute(expression({
- eval(binomialff(link = .link0)@initialize) # w, y, mustart are assigned
+ eval(binomialff(link = .link0 )@initialize) # w, y, mustart are assigned
predictors.names <-
@@ -214,9 +217,8 @@
if (FALSE)
- Abbott = function(lprob1 = "elogit",
- eprob1 = list(min = 0, max = 1), # For now, that is
- lprob0 = "logit", eprob0 = list(),
+ Abbott <- function(lprob1 = elogit(min = 0, max = 1), # For now, that is
+ lprob0 = "logit",
iprob0 = NULL, iprob1 = NULL,
nointercept = 2, # NULL,
zero = 1) {
@@ -227,14 +229,14 @@ if (FALSE)
stop("does not work")
+ lprob1 <- as.list(substitute(lprob1))
+ eprob1 <- link2list(lprob1)
+ lprob1 <- attr(eprob1, "function.name")
- if (mode(lprob1) != "character" && mode(lprob1) != "name")
- lprob1 <- as.character(substitute(lprob1))
- if (!is.list(eprob1)) eprob1 = list()
+ lprob0 <- as.list(substitute(lprob0))
+ eprob0 <- link2list(lprob0)
+ lprob0 <- attr(eprob0, "function.name")
- if (mode(lprob0) != "character" && mode(lprob0) != "name")
- lprob0 <- as.character(substitute(lprob0))
- if (!is.list(eprob0)) eprob0 = list()
new("vglmff",
diff --git a/R/family.rcam.R b/R/family.rcim.R
similarity index 77%
rename from R/family.rcam.R
rename to R/family.rcim.R
index e2cc675..0de4d48 100644
--- a/R/family.rcam.R
+++ b/R/family.rcim.R
@@ -16,7 +16,7 @@
- rcam <- function(y,
+ rcim <- function(y,
family = poissonff,
Rank = 0,
Musual = NULL,
@@ -88,11 +88,11 @@
eifun <- function(i, n) diag(n)[, i, drop = FALSE]
- .rcam.df <-
+ .rcim.df <-
if (!noroweffects) data.frame("Row.2" = eifun(2, nrow(y))) else
if (!nocoleffects) data.frame("Col.2" = eifun(2, nrow(y))) else
stop("at least one of 'noroweffects' and 'nocoleffects' must be FALSE")
- colnames( .rcam.df ) <- paste(rprefix, "2", sep = "") # Overwrite "Row.2"
+ colnames( .rcim.df ) <- paste(rprefix, "2", sep = "") # Overwrite "Row.2"
@@ -132,7 +132,7 @@
Hlist[[ paste(rprefix, ii, sep = "")]] <- matrix(1, ncol(y), 1)
- .rcam.df[[paste(rprefix, ii, sep = "")]] <- modmat.row[, ii]
+ .rcim.df[[paste(rprefix, ii, sep = "")]] <- modmat.row[, ii]
}
@@ -141,21 +141,21 @@
Hlist[[ paste(cprefix, ii, sep = "")]] <- modmat.col[, ii, drop = FALSE]
- .rcam.df[[paste(cprefix, ii, sep = "")]] <- rep(1, nrow(y))
+ .rcim.df[[paste(cprefix, ii, sep = "")]] <- rep(1, nrow(y))
}
if (Rank > 0) {
for(ii in 2:nrow(y)) {
Hlist[[yn1[ii]]] <- diag(ncol(y))
- .rcam.df[[yn1[ii]]] <- eifun(ii, nrow(y))
+ .rcim.df[[yn1[ii]]] <- eifun(ii, nrow(y))
}
}
- dimnames(.rcam.df) <- list(if (length(dimnames(y)[[1]]))
+ dimnames(.rcim.df) <- list(if (length(dimnames(y)[[1]]))
dimnames(y)[[1]] else
as.character(1:nrow(y)),
- dimnames(.rcam.df)[[2]])
+ dimnames(.rcim.df)[[2]])
str1 <- paste("~ ", rprefix, "2", sep = "")
@@ -201,7 +201,7 @@
if (Rank > 0)
mycontrol$Norrr <- as.formula(str1) # Overwrite this
- assign(".rcam.df", .rcam.df, envir = VGAM::VGAMenv)
+ assign(".rcim.df", .rcim.df, envir = VGAM::VGAMenv)
warn.save <- options()$warn
options(warn = -3) # Suppress the warnings (hopefully, temporarily)
@@ -250,7 +250,7 @@
weights = if (length(weights))
weights else rep(1, length = nrow(y)),
...,
- control = mycontrol, data = .rcam.df)
+ control = mycontrol, data = .rcim.df)
} else {
if (is(object.save, "vglm")) object.save else
vglm(as.formula(str2),
@@ -260,7 +260,7 @@
weights = if (length(weights))
weights else rep(1, length = nrow(y)),
...,
- control = mycontrol, data = .rcam.df)
+ control = mycontrol, data = .rcim.df)
}
options(warn = warn.save)
@@ -273,7 +273,7 @@
summary(answer)
}
} else {
- as(answer, ifelse(Rank > 0, "rcam", "rcam0"))
+ as(answer, ifelse(Rank > 0, "rcim", "rcim0"))
}
@@ -292,8 +292,8 @@
-summaryrcam = function(object, ...) {
- rcam(object, summary.arg = TRUE, ...)
+summaryrcim = function(object, ...) {
+ rcim(object, summary.arg = TRUE, ...)
}
@@ -304,21 +304,21 @@ summaryrcam = function(object, ...) {
- setClass("rcam0", representation(not.needed = "numeric"),
+ setClass("rcim0", representation(not.needed = "numeric"),
contains = "vglm") # Added 20110506
- setClass("rcam", representation(not.needed = "numeric"),
+ setClass("rcim", representation(not.needed = "numeric"),
contains = "rrvglm")
-setMethod("summary", "rcam0",
+setMethod("summary", "rcim0",
function(object, ...)
- summaryrcam(object, ...))
+ summaryrcim(object, ...))
-setMethod("summary", "rcam",
+setMethod("summary", "rcim",
function(object, ...)
- summaryrcam(object, ...))
+ summaryrcim(object, ...))
@@ -329,7 +329,7 @@ setMethod("summary", "rcam",
- Rcam <- function (mat, rbaseline = 1, cbaseline = 1) {
+ Rcim <- function (mat, rbaseline = 1, cbaseline = 1) {
mat <- as.matrix(mat)
RRR <- dim(mat)[1]
@@ -389,7 +389,7 @@ setMethod("summary", "rcam",
- plotrcam0 <- function (object,
+ plotrcim0 <- function (object,
centered = TRUE, whichplots = c(1, 2),
hline0 = TRUE, hlty = "dashed", hcol = par()$col, hlwd = par()$lwd,
rfirst = 1, cfirst = 1,
@@ -504,14 +504,14 @@ setMethod("summary", "rcam",
-setMethod("plot", "rcam0",
+setMethod("plot", "rcim0",
function(x, y, ...)
- plotrcam0(object = x, ...))
+ plotrcim0(object = x, ...))
-setMethod("plot", "rcam",
+setMethod("plot", "rcim",
function(x, y, ...)
- plotrcam0(object = x, ...))
+ plotrcim0(object = x, ...))
@@ -667,10 +667,10 @@ confint_nb1 <- function(nb1, level = 0.95) {
stop("argument 'nb1' does not appear to be a negbinomial() fit")
if (!all(unlist(constraints(nb1)[-1]) == 1))
- stop("argument 'nb1' does not appear to have parallel = TRUE")
+ stop("argument 'nb1' does not appear to have 'parallel = TRUE'")
if (!all(unlist(constraints(nb1)[1]) == c(diag(nb1 at misc$M))))
- stop("argument 'nb1' does not have parallel = FALSE ",
+ stop("argument 'nb1' does not have 'parallel = FALSE' ",
"for the intercept")
if (nb1 at misc$M != 2)
@@ -733,7 +733,7 @@ plota21 <- function(rrvglm2, plot.it = TRUE, nseq.a21 = 31,
big.ci.a21 <- a21.hat + c(-1, 1) * se.eachway * se.a21.hat
seq.a21 <- seq(big.ci.a21[1], big.ci.a21[2], length = nseq.a21)
- Hlist.orig <- constraints.vlm(rrvglm2, type = "lm")
+ Hlist.orig <- constraints.vlm(rrvglm2, type = "term")
alreadyComputed <- !is.null(rrvglm2 at post$a21.matrix)
@@ -809,221 +809,154 @@ plota21 <- function(rrvglm2, plot.it = TRUE, nseq.a21 = 31,
-if (FALSE)
-Qvar <- function(object, factor.name = NULL,
- level1.name = "level1",
- ...) {
-
-
-
-
- object.xlevels = if (is(object, "vglm")) {
- object at xlevels
- } else {
- factor(rownames(object))
- }
-
- myvcov = if (is(object, "vglm")) {
- if (length(object.xlevels) == 0)
- stop("no factors amongst the model.matrix of 'object'")
-
- if (is.null(factor.name)) {
- if (length(object.xlevels) > 1)
- stop("more than one factor in the model.matrix of 'object'")
-
- factor.name = names(object.xlevels)
- object.xlevels = object at xlevels[[1]]
- } else {
- object.xlevels = object.xlevels[[factor.name]]
- }
-
-
-
-
-
- colptr = attr(model.matrix(object), "vassign")
- colptr = colptr[[factor.name]]
- vcov(object)[colptr, colptr, drop = FALSE]
- } else if (is.matrix(object)) {
- object
- } else {
- stop("argument 'object' is not a vglm() object or a matrix")
- }
-
-
-
-
- myvcov = rbind(0, cbind(0, myvcov))
-
- LL = nrow(myvcov)
- if (LL <= 3)
- stop("the factor must have at least three levels")
-
-
- vcov0 = myvcov
- for (ilocal in 1:LL)
- for (jlocal in ilocal:LL)
- myvcov[ilocal, jlocal] =
- myvcov[jlocal, ilocal] = vcov0[ilocal, ilocal] +
- vcov0[jlocal, jlocal] -
- vcov0[ilocal, jlocal] * 2
-
- allvcov = myvcov
- rownames(allvcov) =
- c(paste(if (is.matrix(object)) level1.name else factor.name,
- if (is.matrix(object)) NULL else object.xlevels[1],
- sep = ""),
- rownames(vcov0)[-1])
- colnames(allvcov) = rownames(allvcov)
-
-
-
-
-
-
-
-
-
- diag(allvcov) = rep(1, len = LL) # Any positive value should do
-
-
-
-
-
- Allvcov = allvcov
-
-
-
-
- wmat = matrix(1, LL, LL)
- diag(wmat) = sqrt( .Machine$double.eps )
-
-
- logAllvcov = log(Allvcov)
- attr(logAllvcov, "Prior.Weights") = wmat
- logAllvcov
-}
+Qvar <- function(object, factorname = NULL, coef.indices = NULL,
+ labels = NULL, dispersion = NULL,
+ reference.name = "(reference)",
+ estimates = NULL
+ ) {
+ coef.indices.saved <- coef.indices
+ if (!is.matrix(object)) {
+ model <- object
+ if (is.null(factorname) && is.null(coef.indices)) {
+ stop("arguments \"factorname\" and \"coef.indices\" are ",
+ "both NULL")
+ }
+ if (is.null(coef.indices)) {
+ tmodel <- terms(model)
+ modelmat <- if (is.matrix(model at x)) model at x else
+ model.matrix(tmodel,
+ data = model at model)
-Qvar <- function(object, factorname = NULL, coef.indices = NULL,
- labels = NULL, dispersion = NULL,
- reference.name = "(reference)",
- estimates = NULL
- ) {
+ colptr = attr(model.matrix(object, type = "vlm"), "vassign")
- coef.indices.saved <- coef.indices
- if (!is.matrix(object)) {
- model <- object
- if (is.null(factorname) && is.null(coef.indices)) {
- stop("arguments \"factorname\" and \"coef.indices\" are ",
- "both NULL")
+ M <- npred(model)
+ newfactorname = if (M > 1) {
+ clist = constraints(model, type = "term")
+ Mdot = ncol(clist[[factorname]])
+ vlabel(factorname, ncolBlist = Mdot, M = M)
+ } else {
+ factorname
}
- if (is.null(coef.indices)) {
- tmodel <- terms(model)
- modelmat <- if (is.matrix(model at x)) model at x else
- model.matrix(tmodel,
- data = model at model)
-
+ colptr = if (M > 1) {
+ colptr[newfactorname]
+ } else {
+ colptr[[newfactorname]]
+ }
+ coef.indices <- colptr
+ contmat <- if (length(model at xlevels[[factorname]]) ==
+ length(coef.indices)) {
+ diag(length(coef.indices))
+ } else {
+ eval(call(model at contrasts[[factorname]],
+ model at xlevels[[factorname]]))
+ }
+ rownames(contmat) <- model at xlevels[[factorname]]
+
+ if (is.null(estimates)) {
+ if (M > 1) {
+ estimates <- matrix(-1, nrow(contmat), Mdot)
+ for (ii in 1:Mdot)
+ estimates[, ii] <- contmat %*% (coefvlm(model)[(coef.indices[[ii]])])
+ } else {
+ estimates <- contmat %*% (coefvlm(model)[coef.indices])
+ }
+ }
- colptr = attr(model.matrix(object), "vassign")
- colptr = colptr[[factorname]]
- coef.indices <- colptr
+ Covmat <- vcovvlm(model, dispersion = dispersion)
+ covmat <- Covmat[unlist(coef.indices),
+ unlist(coef.indices), drop = FALSE]
+ covmat <- if (M > 1) {
- contmat <- if (length(model at xlevels[[factorname]]) ==
- length(coef.indices)) {
- diag(length(coef.indices))
- } else {
- eval(call(model at contrasts[[factorname]],
- model at xlevels[[factorname]]))
- }
- rownames(contmat) <- model at xlevels[[factorname]]
+ for (ii in 1:Mdot) {
+ ans <- contmat %*% Covmat[colptr[[ii]], (colptr[[ii]])] %*% t(contmat)
+ }
+ ans
- if (is.null(estimates))
- estimates <- contmat %*% coefvlm(model)[coef.indices]
- covmat <- vcovvlm(model, dispersion = dispersion)
- covmat <- covmat[coef.indices, coef.indices, drop = FALSE]
- covmat <- contmat %*% covmat %*% t(contmat)
} else {
- k <- length(coef.indices)
- refPos <- numeric(0)
- if (0 %in% coef.indices) {
- refPos <- which(coef.indices == 0)
- coef.indices <- coef.indices[-refPos]
- }
- covmat <- vcovvlm(model, dispersion = dispersion)
- covmat <- covmat[coef.indices, coef.indices, drop = FALSE]
-
- if (is.null(estimates))
- estimates <- coefvlm(model)[coef.indices]
-
- if (length(refPos) == 1) {
- if (length(estimates) != k)
- estimates <- c(0, estimates)
- covmat <- rbind(0, cbind(0, covmat))
- names(estimates)[1] <-
- rownames(covmat)[1] <-
- colnames(covmat)[1] <- reference.name
- if (refPos != 1) {
- perm <- if (refPos == k) c(2:k, 1) else
- c(2:refPos, 1, (refPos + 1):k)
- estimates <- estimates[perm]
- covmat <- covmat[perm, perm, drop = FALSE]
- }
- }
+ contmat %*% covmat %*% t(contmat)
+ }
+ } else { # ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
+ kk <- length(coef.indices)
+ refPos <- numeric(0)
+ if (0 %in% coef.indices) {
+ refPos <- which(coef.indices == 0)
+ coef.indices <- coef.indices[-refPos]
}
+ covmat <- vcovvlm(model, dispersion = dispersion)
+ covmat <- covmat[coef.indices, coef.indices, drop = FALSE]
+
+ if (is.null(estimates))
+ estimates <- coefvlm(model)[coef.indices]
+
+ if (length(refPos) == 1) {
+ if (length(estimates) != kk)
+ estimates <- c(0, estimates)
+ covmat <- rbind(0, cbind(0, covmat))
+ names(estimates)[1] <-
+ rownames(covmat)[1] <-
+ colnames(covmat)[1] <- reference.name
+ if (refPos != 1) {
+ perm <- if (refPos == kk) c(2:kk, 1) else
+ c(2:refPos, 1, (refPos + 1):kk)
+ estimates <- estimates[perm]
+ covmat <- covmat[perm, perm, drop = FALSE]
+ }
+ }
+ } # ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
- return(Recall(covmat,
- factorname = factorname,
- coef.indices = coef.indices.saved,
- labels = labels,
- dispersion = dispersion,
- estimates = estimates
- )
- )
- } else {
+ return(Recall(covmat,
+ factorname = factorname,
+ coef.indices = coef.indices.saved,
+ labels = labels,
+ dispersion = dispersion,
+ estimates = estimates
+ )
+ )
+ } else { # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
- covmat <- object
- if (length(labels))
- rownames(covmat) <- colnames(covmat) <- labels
- if ((LL <- dim(covmat)[1]) <= 2)
- stop("This function works only for factors with 3 ",
- "or more levels")
- }
+ covmat <- object
+ if (length(labels))
+ rownames(covmat) <- colnames(covmat) <- labels
+ if ((LLL <- dim(covmat)[1]) <= 2)
+ stop("This function works only for factors with 3 ",
+ "or more levels")
+ } # ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
allvcov = covmat
- for (ilocal in 1:LL)
- for (jlocal in ilocal:LL)
+ for (ilocal in 1:LLL)
+ for (jlocal in ilocal:LLL)
allvcov[ilocal, jlocal] =
allvcov[jlocal, ilocal] = covmat[ilocal, ilocal] +
covmat[jlocal, jlocal] -
covmat[ilocal, jlocal] * 2
- diag(allvcov) = rep(1.0, len = LL) # Any positive value should do
+ diag(allvcov) = rep(1.0, len = LLL) # Any positive value should do
- wmat = matrix(1.0, LL, LL)
+ wmat = matrix(1.0, LLL, LLL)
diag(wmat) = sqrt( .Machine$double.eps )
logAllvcov = log(allvcov)
@@ -1044,28 +977,29 @@ Qvar <- function(object, factorname = NULL, coef.indices = NULL,
WorstErrors <- function(qv.object) {
stop("20110729; does not work")
- reducedForm <- function(covmat, qvmat){
- nlevels <- dim(covmat)[1]
- firstRow <- covmat[1, ]
- ones <- rep(1, nlevels)
- J <- outer(ones, ones)
- notzero <- 2:nlevels
- r.covmat <- covmat + (firstRow[1]*J) -
- outer(firstRow, ones) -
- outer(ones, firstRow)
- r.covmat <- r.covmat[notzero, notzero]
- qv1 <- qvmat[1, 1]
- r.qvmat <- (qvmat + qv1*J)[notzero, notzero]
- list(r.covmat, r.qvmat)}
- covmat <- qv.object$covmat
- qvmat <- diag(qv.object$qvframe$quasiVar)
- r.form <- reducedForm(covmat, qvmat)
- r.covmat <- r.form[[1]]
- r.qvmat <- r.form[[2]]
- inverse.sqrt <- solve(chol(r.covmat))
- evalues <- eigen(t(inverse.sqrt) %*% r.qvmat %*% inverse.sqrt,
- symmetric = TRUE)$values
- sqrt(c(min(evalues), max(evalues))) - 1
+ reducedForm <- function(covmat, qvmat) {
+ nlevels <- dim(covmat)[1]
+ firstRow <- covmat[1, ]
+ ones <- rep(1, nlevels)
+ J <- outer(ones, ones)
+ notzero <- 2:nlevels
+ r.covmat <- covmat + (firstRow[1]*J) -
+ outer(firstRow, ones) -
+ outer(ones, firstRow)
+ r.covmat <- r.covmat[notzero, notzero]
+ qv1 <- qvmat[1, 1]
+ r.qvmat <- (qvmat + qv1*J)[notzero, notzero]
+ list(r.covmat, r.qvmat)
+ }
+ covmat <- qv.object$covmat
+ qvmat <- diag(qv.object$qvframe$quasiVar)
+ r.form <- reducedForm(covmat, qvmat)
+ r.covmat <- r.form[[1]]
+ r.qvmat <- r.form[[2]]
+ inverse.sqrt <- solve(chol(r.covmat))
+ evalues <- eigen(t(inverse.sqrt) %*% r.qvmat %*% inverse.sqrt,
+ symmetric = TRUE)$values
+ sqrt(c(min(evalues), max(evalues))) - 1
}
@@ -1074,14 +1008,14 @@ WorstErrors <- function(qv.object) {
IndentPrint <- function(object, indent = 4, ...){
stop("20110729; does not work")
- zz <- ""
- tc <- textConnection("zz", "w", local = TRUE)
- sink(tc)
- try(print(object, ...))
- sink()
- close(tc)
- indent <- paste(rep(" ", indent), sep = "", collapse = "")
- cat(paste(indent, zz, sep = ""), sep = "\n")}
+ zz <- ""
+ tc <- textConnection("zz", "w", local = TRUE)
+ sink(tc)
+ try(print(object, ...))
+ sink()
+ close(tc)
+ indent <- paste(rep(" ", indent), sep = "", collapse = "")
+ cat(paste(indent, zz, sep = ""), sep = "\n")}
@@ -1196,7 +1130,7 @@ plotqvar <- function(object,
if (!any("normal1" %in% object at family@vfamily))
stop("argument 'object' dos not appear to be a ",
- "rcam(, normal1) object")
+ "rcim(, normal1) object")
estimates = c(object at extra$attributes.y$estimates)
if (!length(names(estimates)) &&
diff --git a/R/family.rcqo.R b/R/family.rcqo.R
index f370d54..4b5dca3 100644
--- a/R/family.rcqo.R
+++ b/R/family.rcqo.R
@@ -297,66 +297,68 @@ dcqo <- function(x, p, S,
warning("12/6/06; needs a lot of work based on rcqo()")
- if (mode(family) != "character" && mode(family) != "name")
- family = as.character(substitute(family))
- family = match.arg(family, c("poisson", "binomial",
+ if (mode(family) != "character" && mode(family) != "name")
+ family = as.character(substitute(family))
+ family = match.arg(family, c("poisson", "binomial",
"negbinomial", "ordinal"))[1]
- if (!is.Numeric(p, integer.valued = TRUE,
- positive = TRUE, allowable.length = 1) ||
- p < 2)
- stop("bad input for argument 'p'")
- if (!is.Numeric(S, integer.valued = TRUE,
- positive = TRUE, allowable.length = 1))
- stop("bad input for argument 'S'")
- if (!is.Numeric(Rank, integer.valued = TRUE,
- positive = TRUE, allowable.length = 1))
- stop("bad input for argument 'Rank'")
- if (length(seed) &&
- !is.Numeric(seed, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'seed'")
- if (!is.logical(EqualTolerances) || length(EqualTolerances)>1)
- stop("bad input for argument 'EqualTolerances)'")
- if (EqualMaxima && loabundance != hiabundance)
- stop("'loabundance' and 'hiabundance' must ",
- "be equal when 'EqualTolerances = TRUE'")
- if (length(seed)) set.seed(seed)
- xmat = matrix(rnorm(n*(p-1)), n, p-1, dimnames=list(as.character(1:n),
- paste("x", 2:p, sep="")))
- ccoefs = matrix(rnorm((p-1)*Rank), p-1, Rank)
- lvmat = xmat %*% ccoefs
- optima = matrix(rnorm(Rank*S, sd=sdOptima), S, Rank)
- Tols = if (EqualTolerances) matrix(1, S, Rank) else
- matrix(rnorm(Rank*S, mean=1, sd=1), S, Rank)
- loeta = log(loabundance)
- hieta = log(hiabundance)
- logmaxima = runif(S, min=loeta, max=hieta)
- etamat = matrix(logmaxima,n,S,byrow = TRUE) # eta=log(mu) only; intercept term
- for(jay in 1:S) {
- optmat = matrix(optima[jay,], n, Rank, byrow = TRUE)
- tolmat = matrix(Tols[jay,], n, Rank, byrow = TRUE)
- temp = cbind((lvmat - optmat) * tolmat)
- for(r in 1:Rank)
- etamat[,jay] = etamat[,jay] - 0.5 * temp[,r] *
- (lvmat[,r] - optmat[jay,r])
- }
+ if (!is.Numeric(p, integer.valued = TRUE,
+ positive = TRUE, allowable.length = 1) ||
+ p < 2)
+ stop("bad input for argument 'p'")
+ if (!is.Numeric(S, integer.valued = TRUE,
+ positive = TRUE, allowable.length = 1))
+ stop("bad input for argument 'S'")
+ if (!is.Numeric(Rank, integer.valued = TRUE,
+ positive = TRUE, allowable.length = 1))
+ stop("bad input for argument 'Rank'")
+ if (length(seed) &&
+ !is.Numeric(seed, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'seed'")
+ if (!is.logical(EqualTolerances) || length(EqualTolerances)>1)
+ stop("bad input for argument 'EqualTolerances)'")
+ if (EqualMaxima && loabundance != hiabundance)
+ stop("'loabundance' and 'hiabundance' must ",
+ "be equal when 'EqualTolerances = TRUE'")
+ if (length(seed)) set.seed(seed)
+
+ xmat = matrix(rnorm(n*(p-1)), n, p-1, dimnames=list(as.character(1:n),
+ paste("x", 2:p, sep="")))
+ ccoefs = matrix(rnorm((p-1)*Rank), p-1, Rank)
+ lvmat = xmat %*% ccoefs
+ optima = matrix(rnorm(Rank*S, sd=sdOptima), S, Rank)
+ Tols = if (EqualTolerances) matrix(1, S, Rank) else
+ matrix(rnorm(Rank*S, mean=1, sd=1), S, Rank)
+ loeta = log(loabundance)
+ hieta = log(hiabundance)
+ logmaxima = runif(S, min=loeta, max=hieta)
+
+ etamat = matrix(logmaxima,n,S,byrow = TRUE) # eta=log(mu) only; intercept term
+ for(jay in 1:S) {
+ optmat = matrix(optima[jay,], n, Rank, byrow = TRUE)
+ tolmat = matrix(Tols[jay,], n, Rank, byrow = TRUE)
+ temp = cbind((lvmat - optmat) * tolmat)
+ for(r in 1:Rank)
+ etamat[,jay] = etamat[,jay] - 0.5 * temp[,r] *
+ (lvmat[,r] - optmat[jay,r])
+ }
- ymat = if (family == "negbinomial") {
+ ymat = if (family == "negbinomial") {
- } else {
- matrix(rpois(n*S, lambda = exp(etamat)), n, S)
- }
- if (family == "binomial")
- ymat = 0 + (ymat > 0)
+ } else {
+ matrix(rpois(n*S, lambda = exp(etamat)), n, S)
+ }
+ if (family == "binomial")
+ ymat = 0 + (ymat > 0)
- dimnames(ymat) = list(as.character(1:n), paste("y", 1:S, sep=""))
- ans = data.frame(xmat, ymat)
- attr(ans, "ccoefficients") = ccoefs
- attr(ans, "family") = family
- ans
+ dimnames(ymat) = list(as.character(1:n), paste("y", 1:S, sep=""))
+ ans = data.frame(xmat, ymat)
+ attr(ans, "ccoefficients") = ccoefs
+ attr(ans, "family") = family
+ ans
}
diff --git a/R/family.robust.R b/R/family.robust.R
index a01fbf2..34b535b 100644
--- a/R/family.robust.R
+++ b/R/family.robust.R
@@ -13,10 +13,12 @@
edhuber <- function(x, k = 0.862, mu = 0, sigma = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
+
+
zedd <- (x - mu) / sigma
fk <- dnorm(k)
eps <- 1 - 1 / (pnorm(k) - pnorm(-k) + 2 * fk /k)
@@ -87,8 +89,7 @@ rhuber <- function(n, k = 0.862, mu = 0, sigma = 1) {
-qhuber <- function (p, k = 0.862, mu = 0, sigma = 1)
-{
+qhuber <- function (p, k = 0.862, mu = 0, sigma = 1) {
if(min(sigma) <= 0)
stop("argument 'sigma' must be positive")
if(min(k) <= 0)
@@ -107,8 +108,7 @@ qhuber <- function (p, k = 0.862, mu = 0, sigma = 1)
-phuber <- function(q, k = 0.862, mu = 0, sigma = 1)
-{
+phuber <- function(q, k = 0.862, mu = 0, sigma = 1) {
if (any(sigma <= 0))
stop("argument 'sigma' must be positive")
@@ -128,10 +128,7 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1)
huber <- function(llocation = "identity", lscale = "loge",
- elocation = list(), escale = list(),
- k = 0.862,
- imethod = 1,
- zero = 2) {
+ k = 0.862, imethod = 1, zero = 2) {
A1 <- (2 * dnorm(k) / k - 2 * pnorm(-k))
eps <- A1 / (1 + A1)
@@ -143,34 +140,48 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1)
if (!is.Numeric(k, allowable.length = 1, positive = TRUE))
stop("bad input for argument 'k'")
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
if (length(zero) &&
!is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (!is.list(elocation)) elocation = list()
- if (!is.list(escale)) escale = list()
+
+
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
new("vglmff",
blurb = c("Huber least favorable distribution\n\n",
"Links: ",
- namesof("location", llocation, earg = elocation), ", ",
- namesof("scale", lscale, earg = escale), "\n\n",
+ namesof("location", llocat, earg = elocat), ", ",
+ namesof("scale", lscale, earg = escale), "\n\n",
"Mean: location"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero, M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
predictors.names <-
c(namesof("location", .llocat, earg = .elocat, tag = FALSE),
namesof("scale", .lscale, earg = .escale, tag = FALSE))
- if (ncol(y <- cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
+
if (!length(etastart)) {
- junk = lm.wfit(x = x, y = y, w = w)
- scale.y.est <- sqrt( sum(w * junk$resid^2) / junk$df.residual )
+ junk = lm.wfit(x = x, y = y, w = c(w))
+ scale.y.est <- sqrt( sum(c(w) * junk$resid^2) / junk$df.residual )
location.init <- if ( .llocat == "loge") pmax(1/1024, y) else {
if ( .imethod == 3) {
rep(weighted.mean(y, w), len = n)
@@ -186,39 +197,42 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1)
theta2eta(location.init, .llocat, earg = .elocat),
theta2eta(scale.y.est, .lscale, earg = .escale))
}
- }), list( .llocat = llocation, .lscale = lscale,
- .elocat = elocation, .escale = escale,
+ }), list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[,1], .llocat, earg = .elocat)
- }, list( .llocat = llocation,
- .elocat = elocation, .escale = escale ))),
+ eta2theta(eta[, 1], .llocat, earg = .elocat)
+ }, list( .llocat = llocat,
+ .elocat = elocat, .escale = escale ))),
last = eval(substitute(expression({
misc$link <- c("location" = .llocat, "scale" = .lscale)
+
misc$earg <- list("location" = .elocat, "scale" = .escale)
+
misc$expected <- TRUE
misc$k.huber <- .k
misc$imethod <- .imethod
- }), list( .llocat = llocation, .lscale = lscale,
- .elocat = elocation, .escale = escale,
+ misc$multipleResponses <- FALSE
+ }), list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale,
.k = k, .imethod = imethod ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- location <- eta2theta(eta[,1], .llocat, earg = .elocat)
- myscale <- eta2theta(eta[,2], .lscale, earg = .escale)
+ location <- eta2theta(eta[, 1], .llocat, earg = .elocat)
+ myscale <- eta2theta(eta[, 2], .lscale, earg = .escale)
kay <- .k
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dhuber(y, k = kay, mu = location, sigma = myscale,
+ sum(c(w) * dhuber(y, k = kay, mu = location, sigma = myscale,
log = TRUE))
}
- }, list( .llocat = llocation, .lscale = lscale,
- .elocat = elocation, .escale = escale,
+ }, list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale,
.k = k ))),
vfamily = c("huber"),
deriv = eval(substitute(expression({
- mylocat <- eta2theta(eta[,1], .llocat, earg = .elocat)
- myscale <- eta2theta(eta[,2], .lscale, earg = .escale)
+ mylocat <- eta2theta(eta[, 1], .llocat, earg = .elocat)
+ myscale <- eta2theta(eta[, 2], .lscale, earg = .escale)
myk <- .k
zedd <- (y - mylocat) / myscale
@@ -242,8 +256,8 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1)
c(w) * cbind(dl.dlocat * dlocat.deta,
dl.dscale * dscale.deta)
ans
- }), list( .llocat = llocation, .lscale = lscale,
- .elocat = elocation, .escale = escale,
+ }), list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale,
.eps = eps, .k = k ))),
weight = eval(substitute(expression({
wz <- matrix(as.numeric(NA), n, 2) # diag matrix; y is one-col too
@@ -252,13 +266,13 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1)
temp4 <- erf(myk / sqrt(2))
- ed2l.dlocat2 <- temp4 * (1 - .eps) / myscale^2
+ ned2l.dlocat2 <- temp4 * (1 - .eps) / myscale^2
- ed2l.dscale2 <- (dnorm(myk) * (1 - myk^2) + temp4) *
+ ned2l.dscale2 <- (dnorm(myk) * (1 - myk^2) + temp4) *
2 * (1 - .eps) / (myk * myscale^2)
- wz[, iam(1,1,M)] <- ed2l.dlocat2 * dlocat.deta^2
- wz[, iam(2,2,M)] <- ed2l.dscale2 * dscale.deta^2
+ wz[, iam(1,1,M)] <- ned2l.dlocat2 * dlocat.deta^2
+ wz[, iam(2,2,M)] <- ned2l.dscale2 * dscale.deta^2
ans
c(w) * wz
}), list( .eps = eps ))))
@@ -268,7 +282,6 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1)
huber1 <- function(llocation = "identity",
- elocation = list(),
k = 0.862,
imethod = 1) {
@@ -276,86 +289,100 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1)
A1 <- (2 * dnorm(k) / k - 2 * pnorm(-k))
eps <- A1 / (1 + A1)
- if (!is.Numeric(imethod, allowable.length = 1, integer.valued = TRUE, positive = TRUE) ||
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
imethod > 4)
- stop("argument 'imethod' must be 1 or 2 or 3 or 4")
+ stop("argument 'imethod' must be 1 or 2 or 3 or 4")
if (!is.Numeric(k, allowable.length = 1, positive = TRUE))
- stop("bad input for argument 'k'")
+ stop("bad input for argument 'k'")
+
+
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (!is.list(elocation)) elocation = list()
new("vglmff",
- blurb = c("Huber least favorable distribution\n\n",
- "Links: ",
- namesof("location", llocation, earg = elocation), "\n\n",
- "Mean: location"),
- initialize = eval(substitute(expression({
- predictors.names <-
- c(namesof("location", .llocat, earg = .elocat, tag = FALSE))
-
- if (ncol(y <- cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
-
- if (!length(etastart)) {
- junk = lm.wfit(x = x, y = y, w = w)
- location.init <- if ( .llocat == "loge") pmax(1/1024, y) else {
- if ( .imethod == 3) {
- rep(weighted.mean(y, w), len = n)
- } else if ( .imethod == 2) {
- rep(median(rep(y, w)), len = n)
- } else if ( .imethod == 1) {
- junk$fitted
- } else {
- y
- }
- }
- etastart <- cbind(
- theta2eta(location.init, .llocat, earg = .elocat))
+ blurb = c("Huber least favorable distribution\n\n",
+ "Links: ",
+ namesof("location", llocat, earg = elocat), "\n\n",
+ "Mean: location"),
+ initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+ predictors.names <-
+ c(namesof("location", .llocat, earg = .elocat, tag = FALSE))
+
+
+ if (!length(etastart)) {
+ junk = lm.wfit(x = x, y = y, w = c(w))
+ location.init <- if ( .llocat == "loge") pmax(1/1024, y) else {
+ if ( .imethod == 3) {
+ rep(weighted.mean(y, w), len = n)
+ } else if ( .imethod == 2) {
+ rep(median(rep(y, w)), len = n)
+ } else if ( .imethod == 1) {
+ junk$fitted
+ } else {
+ y
+ }
}
- }), list( .llocat = llocation,
- .elocat = elocation,
- .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta, .llocat, earg = .elocat)
- }, list( .llocat = llocation,
- .elocat = elocation ))),
- last = eval(substitute(expression({
- misc$link <- c("location" = .llocat )
- misc$earg <- list("location" = .elocat )
- misc$expected <- TRUE
- misc$k.huber <- .k
- misc$imethod <- .imethod
- }), list( .llocat = llocation,
- .elocat = elocation,
- .k = k, .imethod = imethod ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- location <- eta2theta(eta, .llocat, earg = .elocat)
- kay <- .k
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dhuber(y, k = kay, mu = location, sigma = 1,
- log = TRUE))
- }
- }, list( .llocat = llocation,
- .elocat = elocation,
- .k = k ))),
- vfamily = c("huber1"),
- deriv = eval(substitute(expression({
- mylocat <- eta2theta(eta, .llocat, earg = .elocat)
- myk <- .k
-
- zedd <- (y - mylocat) # / myscale
- cond2 <- (abs(zedd) <= myk)
- cond3 <- (zedd > myk)
-
- dl.dlocat <- -myk + 0 * zedd # cond1
- dl.dlocat[cond2] <- zedd[cond2]
- dl.dlocat[cond3] <- myk # myk is a scalar
- dl.dlocat <- dl.dlocat # / myscale
+ etastart <- cbind(
+ theta2eta(location.init, .llocat, earg = .elocat))
+ }
+ }), list( .llocat = llocat,
+ .elocat = elocat,
+ .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta, .llocat, earg = .elocat)
+ }, list( .llocat = llocat,
+ .elocat = elocat ))),
+ last = eval(substitute(expression({
+ misc$link <- c("location" = .llocat )
+
+ misc$earg <- list("location" = .elocat )
+
+ misc$expected <- TRUE
+ misc$k.huber <- .k
+ misc$imethod <- .imethod
+ misc$multipleResponses <- FALSE
+ }), list( .llocat = llocat,
+ .elocat = elocat,
+ .k = k, .imethod = imethod ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ location <- eta2theta(eta, .llocat, earg = .elocat)
+ kay <- .k
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dhuber(y, k = kay, mu = location, sigma = 1,
+ log = TRUE))
+ }
+ }, list( .llocat = llocat,
+ .elocat = elocat,
+ .k = k ))),
+ vfamily = c("huber1"),
+ deriv = eval(substitute(expression({
+ mylocat <- eta2theta(eta, .llocat, earg = .elocat)
+ myk <- .k
+
+ zedd <- (y - mylocat) # / myscale
+ cond2 <- (abs(zedd) <= myk)
+ cond3 <- (zedd > myk)
+
+ dl.dlocat <- -myk + 0 * zedd # cond1
+ dl.dlocat[cond2] <- zedd[cond2]
+ dl.dlocat[cond3] <- myk # myk is a scalar
+ dl.dlocat <- dl.dlocat # / myscale
if (FALSE) {
@@ -365,27 +392,26 @@ phuber <- function(q, k = 0.862, mu = 0, sigma = 1)
dl.dscale <- (-1 + dl.dscale) / myscale
}
- dlocat.deta <- dtheta.deta(mylocat, .llocat, earg = .elocat)
- ans <-
- c(w) * cbind(dl.dlocat * dlocat.deta)
- ans
- }), list( .llocat = llocation,
- .elocat = elocation,
- .eps = eps, .k = k ))),
- weight = eval(substitute(expression({
- wz <- matrix(as.numeric(NA), n, 1) # diag matrix; y is one-col too
+ dlocat.deta <- dtheta.deta(mylocat, .llocat, earg = .elocat)
+ ans <- c(w) * cbind(dl.dlocat * dlocat.deta)
+ ans
+ }), list( .llocat = llocat,
+ .elocat = elocat,
+ .eps = eps, .k = k ))),
+ weight = eval(substitute(expression({
+ wz <- matrix(as.numeric(NA), n, 1) # diag matrix; y is one-col too
- temp4 <- erf(myk / sqrt(2))
- ed2l.dlocat2 <- temp4 * (1 - .eps) # / myscale^2
+ temp4 <- erf(myk / sqrt(2))
+ ned2l.dlocat2 <- temp4 * (1 - .eps) # / myscale^2
- wz[, iam(1,1,M)] <- ed2l.dlocat2 * dlocat.deta^2
- ans
- c(w) * wz
- }), list( .eps = eps ))))
+ wz[, iam(1,1,M)] <- ned2l.dlocat2 * dlocat.deta^2
+ ans
+ c(w) * wz
+ }), list( .eps = eps ))))
}
diff --git a/R/family.rrr.R b/R/family.rrr.R
index 25141b9..e7dd0cc 100644
--- a/R/family.rrr.R
+++ b/R/family.rrr.R
@@ -546,20 +546,24 @@ rrr.normalize = function(rrcontrol, A, C, x, Dmat = NULL) {
}
+
+
+
rrr.end.expression = expression({
- if (exists(".VGAM.etamat", envir = VGAM:::VGAMenv))
- rm(".VGAM.etamat", envir = VGAM:::VGAMenv)
+ if (exists(".VGAM.etamat", envir = VGAM:::VGAMenv))
+ rm(".VGAM.etamat", envir = VGAM:::VGAMenv)
- if (control$Quadratic) {
- if (!length(extra)) extra=list()
- extra$Cmat = Cmat # Saves the latest iteration
- extra$Dmat = Dmat # Not the latest iteration
- extra$B1 = B1.save # Not the latest iteration (not good)
- } else {
- Blist = replace.constraints(Blist.save, Amat, colx2.index)
- }
+ if (control$Quadratic) {
+ if (!length(extra))
+ extra = list()
+ extra$Cmat = Cmat # Saves the latest iteration
+ extra$Dmat = Dmat # Not the latest iteration
+ extra$B1 = B1.save # Not the latest iteration (not good)
+ } else {
+ Blist = replace.constraints(Blist.save, Amat, colx2.index)
+ }
X_vlm_save = if (control$Quadratic) {
tmp300 = lm2qrrvlm.model.matrix(x=x, Blist = Blist.save,
diff --git a/R/family.survival.R b/R/family.survival.R
index 391951b..5ea3d7a 100644
--- a/R/family.survival.R
+++ b/R/family.survival.R
@@ -10,12 +10,10 @@
- dcennormal1 = function(r1 = 0, r2 = 0,
- lmu = "identity",
- lsd = "loge",
- emu = list(),
- esd = list(),
- imu = NULL, isd = NULL, zero = 2)
+ dcennormal1 <- function(r1 = 0, r2 = 0,
+ lmu = "identity",
+ lsd = "loge",
+ imu = NULL, isd = NULL, zero = 2)
{
if (!is.Numeric(r1, allowable.length = 1, integer.valued = TRUE) ||
r1 < 0)
@@ -23,12 +21,15 @@
if (!is.Numeric(r2, allowable.length = 1, integer.valued = TRUE) ||
r2 < 0)
stop("bad input for 'r2'")
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu = as.character(substitute(lmu))
- if (mode(lsd) != "character" && mode(lsd) != "name")
- lsd = as.character(substitute(lsd))
- if (!is.list(emu)) emu = list()
- if (!is.list(esd)) esd = list()
+
+ lmu <- as.list(substitute(lmu))
+ emu <- link2list(lmu)
+ lmu <- attr(emu, "function.name")
+
+ lsd <- as.list(substitute(lsd))
+ esd <- link2list(lsd)
+ lsd <- attr(esd, "function.name")
+
new("vglmff",
blurb = c("Univariate Normal distribution with double censoring\n\n",
@@ -148,16 +149,17 @@
-dbisa = function(x, shape, scale = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
+dbisa <- function(x, shape, scale = 1, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
L = max(length(x), length(shape), length(scale))
x = rep(x, len=L); shape = rep(shape, len=L); scale = rep(scale, len=L);
logdensity = rep(log(0), len=L)
xok = (x > 0)
- xifun = function(x) {temp <- sqrt(x); temp - 1/temp}
+ xifun <- function(x) {temp <- sqrt(x); temp - 1/temp}
logdensity[xok] = dnorm(xifun(x[xok]/scale[xok]) / shape[xok], log = TRUE) +
log1p(scale[xok]/x[xok]) - log(2) - log(shape[xok]) -
0.5 * log(x[xok]) - 0.5 * log(scale[xok])
@@ -167,7 +169,7 @@ dbisa = function(x, shape, scale = 1, log = FALSE) {
}
-pbisa = function(q, shape, scale=1) {
+pbisa <- function(q, shape, scale=1) {
if (!is.Numeric(q))
stop("bad input for argument 'q'")
if (!is.Numeric(shape, positive = TRUE))
@@ -181,7 +183,7 @@ pbisa = function(q, shape, scale=1) {
}
-qbisa = function(p, shape, scale=1) {
+qbisa <- function(p, shape, scale=1) {
if (!is.Numeric(p, positive = TRUE) || any(p >= 1))
stop("argument 'p' must have values inside the interval (0,1)")
if (!is.Numeric(shape, positive = TRUE))
@@ -196,7 +198,7 @@ qbisa = function(p, shape, scale=1) {
}
-rbisa = function(n, shape, scale=1) {
+rbisa <- function(n, shape, scale=1) {
use.n = if ((length.n <- length(n)) > 1) length.n else
if (!is.Numeric(n, integer.valued = TRUE,
allowable.length = 1, positive = TRUE))
@@ -222,27 +224,30 @@ rbisa = function(n, shape, scale=1) {
- bisa = function(lshape = "loge", lscale = "loge",
- eshape = list(), escale = list(),
- ishape = NULL, iscale = 1,
- imethod = 1, zero = NULL)
+ bisa <- function(lshape = "loge", lscale = "loge",
+ ishape = NULL, iscale = 1,
+ imethod = 1, zero = NULL)
{
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
-
- if (length(ishape) && !is.Numeric(ishape, positive = TRUE))
- stop("bad input for argument 'ishape'")
- if (!is.Numeric(iscale, positive = TRUE))
- stop("bad input for argument 'iscale'")
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 3)
- stop("argument 'imethod' must be 1 or 2 or 3")
-
- if (!is.list(eshape)) eshape = list()
- if (!is.list(escale)) escale = list()
+ lshape <- as.list(substitute(lshape))
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
+ if (length(ishape) && !is.Numeric(ishape, positive = TRUE))
+ stop("bad input for argument 'ishape'")
+ if (!is.Numeric(iscale, positive = TRUE))
+ stop("bad input for argument 'iscale'")
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 3)
+ stop("argument 'imethod' must be 1 or 2 or 3")
+
+
+
new("vglmff",
blurb = c("Birnbaum-Saunders distribution\n\n",
@@ -317,7 +322,7 @@ rbisa = function(n, shape, scale=1) {
weight = eval(substitute(expression({
wz = matrix(as.numeric(NA), n, M) # Diagonal!!
wz[,iam(1,1,M)] = 2 * dsh.deta^2 / sh^2
- hfunction = function(alpha)
+ hfunction <- function(alpha)
alpha * sqrt(pi/2) - pi * exp(2/alpha^2) *
pnorm(2/alpha, lower.tail = FALSE)
wz[,iam(2,2,M)] = dsc.deta^2 * (sh * hfunction(sh) / sqrt(2*pi) +
diff --git a/R/family.ts.R b/R/family.ts.R
index e06c4a5..43cd1d4 100644
--- a/R/family.ts.R
+++ b/R/family.ts.R
@@ -126,14 +126,14 @@
-rrar.control <- function(stepsize = 0.5, save.weight = TRUE, ...)
-{
+rrar.control <- function(stepsize = 0.5, save.weight = TRUE, ...) {
if (stepsize <= 0 || stepsize > 1) {
warning("bad value of stepsize; using 0.5 instead")
stepsize <- 0.5
}
- list(stepsize=stepsize, save.weight = as.logical(save.weight)[1])
+ list(stepsize = stepsize,
+ save.weight = as.logical(save.weight)[1])
}
@@ -142,8 +142,8 @@ rrar.control <- function(stepsize = 0.5, save.weight = TRUE, ...)
lag.p <- length(Ranks)
new("vglmff",
- blurb = c("Nested reduced-rank vector autoregressive model AR(", lag.p,
- ")\n\n",
+ blurb = c("Nested reduced-rank vector autoregressive model AR(",
+ lag.p, ")\n\n",
"Link: ",
namesof("mu_t", "identity"),
", t = ", paste(paste(1:lag.p, coll = ",", sep = "")) ,
@@ -152,7 +152,6 @@ rrar.control <- function(stepsize = 0.5, save.weight = TRUE, ...)
Ranks. <- .Ranks
plag <- length(Ranks.)
nn <- nrow(x) # original n
- pp <- ncol(x)
indices <- 1:plag
copy_X_vlm <- TRUE # X_vlm_save matrix changes at each iteration
@@ -282,22 +281,16 @@ rrar.control <- function(stepsize = 0.5, save.weight = TRUE, ...)
-
-vglm.garma.control <- function(save.weight = TRUE, ...)
-{
+vglm.garma.control <- function(save.weight = TRUE, ...) {
list(save.weight = as.logical(save.weight)[1])
}
garma <- function(link = "identity",
- earg = list(),
- p.ar.lag = 1,
- q.ma.lag = 0,
- coefstart = NULL,
- step = 1.0)
-{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
+ p.ar.lag = 1,
+ q.ma.lag = 0,
+ coefstart = NULL,
+ step = 1.0) {
if (!is.Numeric(p.ar.lag, integer.valued = TRUE, allowable.length = 1))
stop("bad input for argument 'p.ar.lag'")
@@ -306,7 +299,10 @@ vglm.garma.control <- function(save.weight = TRUE, ...)
if (q.ma.lag != 0)
stop("sorry, only q.ma.lag = 0 is currently implemented")
- if (!is.list(earg)) earg = list()
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
new("vglmff",
@@ -316,32 +312,34 @@ vglm.garma.control <- function(save.weight = TRUE, ...)
", t = ", paste(paste(1:p.ar.lag, coll = ",", sep = ""))),
initialize = eval(substitute(expression({
plag <- .p.ar.lag
- predictors.names = namesof("mu", .link, earg = .earg, tag = FALSE)
+ predictors.names = namesof("mu", .link , earg = .earg , tag = FALSE)
indices <- 1:plag
- tt <- (1+plag):nrow(x)
- pp <- ncol(x)
+ tt.index <- (1 + plag):nrow(x)
+ p_lm <- ncol(x)
copy_X_vlm <- TRUE # x matrix changes at each iteration
- if ( .link == "logit" || .link == "probit" || .link == "cloglog" ||
- .link == "cauchit") {
+ if ( .link == "logit" || .link == "probit" ||
+ .link == "cloglog" || .link == "cauchit") {
delete.zero.colns <- TRUE
eval(process.categorical.data.vgam)
- mustart <- mustart[tt,2]
- y <- y[,2]
+ mustart <- mustart[tt.index, 2]
+ y <- y[, 2]
+ } else {
}
- x.save <- x # Save the original
- y.save <- y # Save the original
- w.save <- w # Save the original
+
+ x.save <- x # Save the original
+ y.save <- y # Save the original
+ w.save <- w # Save the original
new.coeffs <- .coefstart # Needed for iter = 1 of @weight
new.coeffs <- if (length(new.coeffs))
- rep(new.coeffs, len = pp+plag) else
- c(runif(pp), rep(0, plag))
+ rep(new.coeffs, len = p_lm + plag) else
+ c(rnorm(p_lm, sd = 0.1), rep(0, plag))
if (!length(etastart)) {
- etastart <- x[-indices, , drop = FALSE] %*% new.coeffs[1:pp]
+ etastart <- x[-indices, , drop = FALSE] %*% new.coeffs[1:p_lm]
}
x <- cbind(x, matrix(as.numeric(NA), n, plag)) # Right size now
@@ -354,82 +352,89 @@ vglm.garma.control <- function(save.weight = TRUE, ...)
y <- y[-indices]
w <- w[-indices]
n.save <- n <- n - plag
+
more <- vector("list", plag)
names(more) <- morenames
- for(i in 1:plag)
- more[[i]] <- i + max(unlist(attr(x.save, "assign")))
+ for(ii in 1:plag)
+ more[[ii]] <- ii + max(unlist(attr(x.save, "assign")))
attr(x, "assign") <- c(attr(x.save, "assign"), more)
}), list( .link = link, .p.ar.lag = p.ar.lag,
.coefstart = coefstart, .earg = earg ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta, link = .link, earg = .earg)
+ eta2theta(eta, link = .link , earg = .earg)
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link <- c(mu = .link)
- misc$earg <- list(mu = .earg)
+ misc$link <- c(mu = .link )
+ misc$earg <- list(mu = .earg )
misc$plag <- plag
}), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
if (residuals) switch( .link ,
- identity = y-mu,
- loge = w*(y/mu - 1),
- inverse = w*(y/mu - 1),
- w*(y/mu - (1-y)/(1-mu))) else
+ identity = y - mu,
+ loge = w * (y / mu - 1),
+ reciprocal = w * (y / mu - 1),
+ inverse = w * (y / mu - 1),
+ w * (y / mu - (1-y) / (1 - mu))) else
switch( .link ,
- identity = sum(w*(y-mu)^2),
- loge = sum(w*(-mu + y*log(mu))),
- inverse = sum(w*(-mu + y*log(mu))),
- sum(w*(y * log(mu) + (1-y) * log1p(-mu))))
+ identity = sum(w * (y - mu)^2),
+ loge = sum(w * (-mu + y * log(mu))),
+ reciprocal = sum(w * (-mu + y * log(mu))),
+ inverse = sum(w * (-mu + y * log(mu))),
+ sum(w * (y * log(mu) + (1-y) * log1p(-mu))))
}, list( .link = link, .earg = earg ))),
middle2 = eval(substitute(expression({
realfv <- fv
- for(i in 1:plag) {
- realfv <- realfv + old.coeffs[i+pp] *
- (x.save[tt-i, 1:pp,drop = FALSE] %*% new.coeffs[1:pp]) # +
+ for(ii in 1:plag) {
+ realfv <- realfv + old.coeffs[ii + p_lm] *
+ (x.save[tt.index-ii, 1:p_lm, drop = FALSE] %*%
+ new.coeffs[1:p_lm]) # +
}
true.eta <- realfv + offset
- mu <- family at linkinv(true.eta, extra) # overwrite mu with correct one
+ mu <- family at linkinv(true.eta, extra) # overwrite mu with correct one
}), list( .link = link, .earg = earg ))),
vfamily = c("garma", "vglmgam"),
deriv = eval(substitute(expression({
- dl.dmu <- switch( .link,
+ dl.dmu <- switch( .link ,
identity = y-mu,
- loge = (y - mu) / mu,
- inverse = (y - mu) / mu,
+ loge = (y - mu) / mu,
+ reciprocal = (y - mu) / mu,
+ inverse = (y - mu) / mu,
(y - mu) / (mu * (1 - mu)))
- dmu.deta <- dtheta.deta(mu, .link, earg = .earg)
- step <- .step # This is another method of adjusting step lengths
- step * w * dl.dmu * dmu.deta
+ dmu.deta <- dtheta.deta(mu, .link , earg = .earg)
+ Step <- .step # This is another method of adjusting step lengths
+ Step * c(w) * dl.dmu * dmu.deta
}), list( .link = link,
.step = step,
.earg = earg ))),
weight = eval(substitute(expression({
- x[, 1:pp] <- x.save[tt, 1:pp] # Reinstate
+ x[, 1:p_lm] <- x.save[tt.index, 1:p_lm] # Reinstate
- for(i in 1:plag) {
- temp = theta2eta(y.save[tt-i], .link, earg = .earg)
+ for(ii in 1:plag) {
+ temp = theta2eta(y.save[tt.index-ii], .link , earg = .earg )
- x[, 1:pp] <- x[, 1:pp] - x.save[tt-i, 1:pp] * new.coeffs[i+pp]
- x[, pp+i] <- temp - x.save[tt-i, 1:pp,drop = FALSE] %*%
- new.coeffs[1:pp]
+ x[, 1:p_lm] <- x[, 1:p_lm] -
+ x.save[tt.index-ii, 1:p_lm] * new.coeffs[ii + p_lm]
+ x[, p_lm+ii] <- temp - x.save[tt.index-ii, 1:p_lm, drop = FALSE] %*%
+ new.coeffs[1:p_lm]
}
class(x) = "matrix" # Added 27/2/02; 26/2/04
if (iter == 1)
- old.coeffs <- new.coeffs
+ old.coeffs <- new.coeffs
X_vlm_save <- lm2vlm.model.matrix(x, Blist, xij = control$xij)
vary = switch( .link ,
identity = 1,
- loge = mu,
- inverse = mu^2,
+ loge = mu,
+ reciprocal = mu^2,
+ inverse = mu^2,
mu * (1 - mu))
- w * dtheta.deta(mu, link = .link , earg = .earg )^2 / vary
+ c(w) * dtheta.deta(mu, link = .link , earg = .earg )^2 / vary
}), list( .link = link,
.earg = earg ))))
}
diff --git a/R/family.univariate.R b/R/family.univariate.R
index 50e6085..db1217e 100644
--- a/R/family.univariate.R
+++ b/R/family.univariate.R
@@ -22,126 +22,156 @@
-getMaxMin = function(vov, objfun, y, x, w, extraargs = NULL, maximize = TRUE,
- abs.arg = FALSE) {
- if (!is.vector(vov)) stop("'vov' must be a vector")
- objvals = vov
- for(ii in 1:length(vov))
- objvals[ii] = objfun(vov[ii], y = y, x = x, w = w, extraargs=extraargs)
- try.this = if (abs.arg) {
- if (maximize) vov[abs(objvals) == max(abs(objvals))] else
- vov[abs(objvals) == min(abs(objvals))]
- } else {
- if (maximize) vov[objvals == max(objvals)] else
- vov[objvals == min(objvals)]
- }
- if (!length(try.this)) stop("something has gone wrong!")
- if (length(try.this) == 1) try.this else sample(try.this, size=1)
+
+
+
+
+
+
+ getMaxMin <- function(vov, objfun, y, x, w, extraargs = NULL,
+ maximize = TRUE, abs.arg = FALSE,
+ ret.objfun = FALSE) {
+ if (!is.vector(vov))
+ stop("'vov' must be a vector")
+ objvals <- vov
+ for(ii in 1:length(vov))
+ objvals[ii] <- objfun(vov[ii], y = y, x = x, w = w,
+ extraargs = extraargs)
+ try.this <- if (abs.arg) {
+ if (maximize) vov[abs(objvals) == max(abs(objvals))] else
+ vov[abs(objvals) == min(abs(objvals))]
+ } else {
+ if (maximize) vov[objvals == max(objvals)] else
+ vov[objvals == min(objvals)]
+ }
+ if (!length(try.this))
+ stop("something has gone wrong!")
+ ans <- if (length(try.this) == 1)
+ try.this else sample(try.this, size = 1)
+ if (ret.objfun) c(ans, objvals[ans == vov]) else ans
}
- mccullagh89 = function(ltheta = "rhobit", lnu = "logoff",
- itheta = NULL, inu = NULL,
- etheta = list(),
- enu = if (lnu == "logoff") list(offset = 0.5) else list(),
- zero = NULL)
+
+ mccullagh89 <- function(ltheta = "rhobit",
+ lnu = logoff(offset = 0.5),
+ itheta = NULL, inu = NULL,
+ zero = NULL)
{
- if (mode(ltheta) != "character" && mode(ltheta) != "name")
- ltheta = as.character(substitute(ltheta))
- if (mode(lnu) != "character" && mode(lnu) != "name")
- lnu = as.character(substitute(lnu))
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
- if (!is.list(etheta)) etheta = list()
- if (!is.list(enu)) enu = list()
- new("vglmff",
- blurb = c("McCullagh (1989)'s distribution \n",
- "f(y) = (1-2*theta*y+theta^2)^(-nu) * [1 - y^2]^(nu-1/2) /\n",
- " Beta[nu+1/2, 1/2], ",
- " -1 < y < 1, -1 < theta < 1, nu > -1/2\n",
- "Links: ",
- namesof("theta", ltheta, earg = etheta), ", ",
- namesof("nu", lnu, earg = enu),
- "\n",
- "\n",
- "Mean: nu*theta/(1+nu)"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- y = as.numeric(y)
- if (any(y <= -1 | y >= 1))
- stop("all y values must be in (-1,1)")
+ ltheta <- as.list(substitute(ltheta))
+ etheta <- link2list(ltheta)
+ ltheta <- attr(etheta, "function.name")
- predictors.names =
- c(namesof("theta", .ltheta, earg = .etheta, tag = FALSE),
- namesof("nu", .lnu, earg = .enu, tag = FALSE))
- if (!length(etastart)) {
- theta.init = if (length( .itheta))
- rep( .itheta, length = n) else {
- mccullagh89.aux = function(thetaval, y, x, w, extraargs)
- mean((y-thetaval)*(thetaval^2-1)/(1-2*thetaval*y+thetaval^2))
- theta.grid = seq(-0.9, 0.9, by=0.05)
- try.this = getMaxMin(theta.grid, objfun = mccullagh89.aux,
- y = y, x = x, w = w, maximize = FALSE,
- abs.arg = TRUE)
- try.this = rep(try.this, length.out = n)
- try.this
- }
- tmp = y / (theta.init-y)
- tmp[tmp < -0.4] = -0.4
- tmp[tmp > 10.0] = 10.0
- nu.init = rep(if (length( .inu)) .inu else tmp, length = n)
- nu.init[!is.finite(nu.init)] = 0.4
- etastart = cbind(theta2eta(theta.init, .ltheta, earg = .etheta ),
- theta2eta(nu.init, .lnu, earg = .enu ))
- }
- }), list( .ltheta = ltheta, .lnu=lnu, .inu=inu, .itheta = itheta,
- .etheta = etheta, .enu=enu ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- Theta = eta2theta(eta[, 1], .ltheta, earg = .etheta )
- nu = eta2theta(eta[, 2], .lnu, earg = .enu )
- nu*Theta/(1+nu)
- }, list( .ltheta = ltheta, .lnu=lnu,
- .etheta = etheta, .enu=enu ))),
- last = eval(substitute(expression({
- misc$link = c("theta" = .ltheta, "nu" = .lnu)
- misc$earg = list("theta" = .etheta, "nu" = .enu )
- }), list( .ltheta = ltheta, .lnu=lnu, .etheta = etheta, .enu=enu ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Theta = eta2theta(eta[, 1], .ltheta, earg = .etheta )
- nu = eta2theta(eta[, 2], .lnu, earg = .enu )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * ((nu-0.5)*log1p(-y^2) - nu * log1p(-2*Theta*y + Theta^2) -
- lbeta(nu+0.5,0.5 )))
- }, list( .ltheta = ltheta, .lnu=lnu, .etheta = etheta, .enu=enu ))),
- vfamily = c("mccullagh89"),
- deriv = eval(substitute(expression({
- Theta = eta2theta(eta[, 1], .ltheta, earg = .etheta )
- nu = eta2theta(eta[, 2], .lnu, earg = .enu )
- dTheta.deta = dtheta.deta(Theta, .ltheta, earg = .etheta )
- dnu.deta = dtheta.deta(nu, .lnu, earg = .enu )
- dl.dTheta = 2 * nu * (y-Theta) / (1 -2*Theta*y + Theta^2)
- dl.dnu = log1p(-y^2) - log1p(-2*Theta*y + Theta^2) -
- digamma(nu+0.5) + digamma(nu+1)
- c(w) * cbind(dl.dTheta * dTheta.deta,
- dl.dnu * dnu.deta)
- }), list( .ltheta = ltheta, .lnu=lnu, .etheta = etheta, .enu=enu ))),
- weight = eval(substitute(expression({
- d2l.dTheta2 = (2 * nu^2 / (1+nu)) / (1-Theta^2)
- d2l.dnu2 = trigamma(nu+0.5) - trigamma(nu+1)
- wz = matrix(as.numeric(NA), n, M) #diagonal matrix
- wz[,iam(1,1,M)] = d2l.dTheta2 * dTheta.deta^2
- wz[,iam(2,2,M)] = d2l.dnu2 * dnu.deta^2
- c(w) * wz
- }), list( .ltheta = ltheta, .lnu=lnu ))))
+ lnu <- as.list(substitute(lnu))
+ enu <- link2list(lnu)
+ lnu <- attr(enu, "function.name")
+
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+ new("vglmff",
+ blurb = c("McCullagh (1989)'s distribution \n",
+ "f(y) = (1-2*theta*y+theta^2)^(-nu) * [1 - y^2]^(nu-1/2) /\n",
+ " Beta[nu+1/2, 1/2], ",
+ " -1 < y < 1, -1 < theta < 1, nu > -1/2\n",
+ "Links: ",
+ namesof("theta", ltheta, earg = etheta), ", ",
+ namesof("nu", lnu, earg = enu),
+ "\n",
+ "\n",
+ "Mean: nu*theta/(1+nu)"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ w.y.check(w, y)
+
+ y = as.numeric(y)
+ if (any(y <= -1 | y >= 1))
+ stop("all y values must be in (-1, 1)")
+
+ predictors.names <-
+ c(namesof("theta", .ltheta , earg = .etheta, tag = FALSE),
+ namesof("nu", .lnu , earg = .enu, tag = FALSE))
+
+ if (!length(etastart)) {
+ theta.init = if (length( .itheta ))
+ rep( .itheta, length = n) else {
+ mccullagh89.aux <- function(thetaval, y, x, w, extraargs)
+ mean((y-thetaval)*(thetaval^2-1)/(1-2*thetaval*y+thetaval^2))
+ theta.grid = seq(-0.9, 0.9, by=0.05)
+ try.this = getMaxMin(theta.grid, objfun = mccullagh89.aux,
+ y = y, x = x, w = w, maximize = FALSE,
+ abs.arg = TRUE)
+ try.this = rep(try.this, length.out = n)
+ try.this
+ }
+ tmp = y / (theta.init - y)
+ tmp[tmp < -0.4] = -0.4
+ tmp[tmp > 10.0] = 10.0
+ nu.init = rep(if (length( .inu)) .inu else tmp, length = n)
+ nu.init[!is.finite(nu.init)] = 0.4
+ etastart <-
+ cbind(theta2eta(theta.init, .ltheta , earg = .etheta ),
+ theta2eta(nu.init, .lnu, earg = .enu ))
+ }
+ }), list( .ltheta = ltheta, .lnu = lnu, .inu = inu, .itheta = itheta,
+ .etheta = etheta, .enu = enu ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ Theta = eta2theta(eta[, 1], .ltheta , earg = .etheta )
+ nu = eta2theta(eta[, 2], .lnu, earg = .enu )
+ nu * Theta / (1 + nu)
+ }, list( .ltheta = ltheta, .lnu = lnu,
+ .etheta = etheta, .enu = enu ))),
+ last = eval(substitute(expression({
+ misc$link = c("theta" = .ltheta , "nu" = .lnu)
+ misc$earg = list("theta" = .etheta, "nu" = .enu )
+ }), list( .ltheta = ltheta, .lnu = lnu, .etheta = etheta, .enu = enu ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ Theta = eta2theta(eta[, 1], .ltheta , earg = .etheta )
+ nu = eta2theta(eta[, 2], .lnu, earg = .enu )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(c(w) * ((nu-0.5)*log1p(-y^2) -
+ nu * log1p(-2*Theta*y + Theta^2) -
+ lbeta(nu + 0.5, 0.5)))
+ }, list( .ltheta = ltheta, .lnu = lnu,
+ .etheta = etheta, .enu = enu ))),
+ vfamily = c("mccullagh89"),
+ deriv = eval(substitute(expression({
+ Theta = eta2theta(eta[, 1], .ltheta , earg = .etheta )
+ nu = eta2theta(eta[, 2], .lnu, earg = .enu )
+
+ dTheta.deta = dtheta.deta(Theta, .ltheta , earg = .etheta )
+ dnu.deta = dtheta.deta(nu, .lnu, earg = .enu )
+
+ dl.dTheta = 2 * nu * (y-Theta) / (1 -2*Theta*y + Theta^2)
+ dl.dnu = log1p(-y^2) - log1p(-2*Theta*y + Theta^2) -
+ digamma(nu + 0.5) + digamma(nu + 1)
+
+ c(w) * cbind(dl.dTheta * dTheta.deta,
+ dl.dnu * dnu.deta)
+ }), list( .ltheta = ltheta, .lnu = lnu,
+ .etheta = etheta, .enu = enu ))),
+ weight = eval(substitute(expression({
+ d2l.dTheta2 = (2 * nu^2 / (1+nu)) / (1-Theta^2)
+ d2l.dnu2 = trigamma(nu+0.5) - trigamma(nu+1)
+
+ wz = matrix(as.numeric(NA), n, M) # diagonal matrix
+ wz[, iam(1, 1, M)] = d2l.dTheta2 * dTheta.deta^2
+ wz[, iam(2, 2, M)] = d2l.dnu2 * dnu.deta^2
+
+ c(w) * wz
+ }), list( .ltheta = ltheta, .lnu = lnu ))))
}
@@ -149,106 +179,112 @@ getMaxMin = function(vov, objfun, y, x, w, extraargs = NULL, maximize = TRUE,
hzeta.control <- function(save.weight = TRUE, ...)
{
- list(save.weight = save.weight)
+ list(save.weight = save.weight)
}
- hzeta = function(link = "loglog", earg = list(),
- ialpha = NULL, nsimEIM = 100)
-{
+ hzeta <- function(link = "loglog", ialpha = NULL, nsimEIM = 100) {
- stopifnot(ialpha > 0)
- stopifnot(nsimEIM > 10, length(nsimEIM) == 1, nsimEIM == round(nsimEIM))
+ stopifnot(ialpha > 0)
+ stopifnot(nsimEIM > 10,
+ length(nsimEIM) == 1,
+ nsimEIM == round(nsimEIM))
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
- new("vglmff",
- blurb = c(
- "Haight's Zeta distribution f(y) = (2y-1)^(-alpha) - (2y+1)^(-alpha),\n",
- " alpha>0, y = 1,2,....\n\n",
- "Link: ",
- namesof("alpha", link, earg = earg), "\n\n",
- "Mean: (1-2^(-alpha)) * zeta(alpha) if alpha>1",
- "\n",
- "Variance: (1-2^(1-alpha)) * zeta(alpha-1) - mean^2 if alpha>2"),
- initialize = eval(substitute(expression({
- y = as.numeric(y)
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- if (any(y < 1))
- stop("all y values must be in 1,2,3,....")
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
- predictors.names = namesof("alpha", .link, earg = .earg, tag = FALSE)
- if (!length(etastart)) {
- a.init = if (length( .ialpha)) .ialpha else {
- if ((meany <- weighted.mean(y, w)) < 1.5) 3.0 else
- if (meany < 2.5) 1.4 else 1.1
- }
- a.init = rep(a.init, length = n)
- etastart = theta2eta(a.init, .link, earg = .earg )
- }
- }), list( .link = link, .earg = earg, .ialpha=ialpha ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- alpha = eta2theta(eta, .link, earg = .earg )
- mu = (1-2^(-alpha)) * zeta(alpha)
- mu[alpha <= 1] = Inf
- mu
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = c(alpha = .link)
- misc$earg = list(alpha = .earg )
- misc$nsimEIM = .nsimEIM
- }), list( .link = link, .earg = earg, .nsimEIM = nsimEIM ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- alpha = eta2theta(eta, .link, earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dhzeta(x = y, alpha=alpha, log = TRUE))
- }
- }, list( .link = link, .earg = earg ))),
- vfamily = c("hzeta"),
- deriv = eval(substitute(expression({
- alpha = eta2theta(eta, .link, earg = .earg )
- dalpha.deta = dtheta.deta(alpha, .link, earg = .earg )
- d3 = deriv3(~ log((2*y-1)^(-alpha) - (2*y+1)^(-alpha)),
- "alpha", hessian = FALSE)
- eval.d3 = eval(d3)
- dl.dalpha = attr(eval.d3, "gradient")
- c(w) * dl.dalpha * dalpha.deta
- }), list( .link = link, .earg = earg ))),
- weight = eval(substitute(expression({
- sd3 = deriv3(~ log((2*ysim-1)^(-alpha) - (2*ysim+1)^(-alpha)),
- "alpha", hessian = FALSE)
- run.var = 0
- for(ii in 1:( .nsimEIM )) {
- ysim = rhzeta(n, alpha=alpha)
- eval.sd3 = eval(sd3)
- dl.dalpha = attr(eval.d3, "gradient")
- rm(ysim)
- temp3 = dl.dalpha
- run.var = ((ii-1) * run.var + temp3^2) / ii
- }
- wz = if (intercept.only)
- matrix(colMeans(cbind(run.var)),
- n, dimm(M), byrow = TRUE) else cbind(run.var)
+ new("vglmff",
+ blurb = c(
+ "Haight's Zeta distribution f(y) = (2y-1)^(-alpha) - (2y+1)^(-alpha),\n",
+ " alpha>0, y = 1, 2,....\n\n",
+ "Link: ",
+ namesof("alpha", link, earg = earg), "\n\n",
+ "Mean: (1-2^(-alpha)) * zeta(alpha) if alpha>1",
+ "\n",
+ "Variance: (1-2^(1-alpha)) * zeta(alpha-1) - mean^2 if alpha>2"),
+ initialize = eval(substitute(expression({
- wz = wz * dalpha.deta^2
- c(w) * wz
- }), list( .nsimEIM = nsimEIM ))))
+ w.y.check(w = w, y = y)
+
+ if (any(y < 1))
+ stop("all y values must be in 1, 2, 3,....")
+
+ predictors.names <-
+ namesof("alpha", .link , earg = .earg , tag = FALSE)
+
+ if (!length(etastart)) {
+ a.init = if (length( .ialpha)) .ialpha else {
+ if ((meany <- weighted.mean(y, w)) < 1.5) 3.0 else
+ if (meany < 2.5) 1.4 else 1.1
+ }
+ a.init = rep(a.init, length = n)
+ etastart <- theta2eta(a.init, .link , earg = .earg )
+ }
+ }), list( .link = link, .earg = earg, .ialpha = ialpha ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ alpha = eta2theta(eta, .link , earg = .earg )
+ mu = (1-2^(-alpha)) * zeta(alpha)
+ mu[alpha <= 1] = Inf
+ mu
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link = c(alpha = .link)
+ misc$earg = list(alpha = .earg )
+ misc$nsimEIM = .nsimEIM
+ }), list( .link = link, .earg = earg, .nsimEIM = nsimEIM ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ alpha = eta2theta(eta, .link , earg = .earg )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dhzeta(x = y, alpha = alpha, log = TRUE))
+ }
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("hzeta"),
+ deriv = eval(substitute(expression({
+ alpha = eta2theta(eta, .link , earg = .earg )
+
+ dalpha.deta = dtheta.deta(alpha, .link , earg = .earg )
+
+ d3 = deriv3(~ log((2*y-1)^(-alpha) - (2*y+1)^(-alpha)),
+ "alpha", hessian = FALSE)
+ eval.d3 = eval(d3)
+
+ dl.dalpha = attr(eval.d3, "gradient")
+
+ c(w) * dl.dalpha * dalpha.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+ sd3 = deriv3(~ log((2*ysim-1)^(-alpha) - (2*ysim+1)^(-alpha)),
+ "alpha", hessian = FALSE)
+ run.var = 0
+ for(ii in 1:( .nsimEIM )) {
+ ysim = rhzeta(n, alpha=alpha)
+ eval.sd3 = eval(sd3)
+ dl.dalpha = attr(eval.d3, "gradient")
+ rm(ysim)
+ temp3 = dl.dalpha
+ run.var = ((ii-1) * run.var + temp3^2) / ii
+ }
+ wz = if (intercept.only)
+ matrix(colMeans(cbind(run.var)),
+ n, dimm(M), byrow = TRUE) else cbind(run.var)
+
+ wz = wz * dalpha.deta^2
+ c(w) * wz
+ }), list( .nsimEIM = nsimEIM ))))
}
-dhzeta = function(x, alpha, log = FALSE)
-{
- if (!is.logical(log.arg <- log))
+dhzeta <- function(x, alpha, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -266,8 +302,7 @@ dhzeta = function(x, alpha, log = FALSE)
}
-phzeta = function(q, alpha)
-{
+phzeta <- function(q, alpha) {
nn = max(length(q), length(alpha))
@@ -285,23 +320,22 @@ phzeta = function(q, alpha)
}
-qhzeta = function(p, alpha)
-{
+qhzeta <- function(p, alpha) {
- if (!is.Numeric(p, positive = TRUE) || any(p >= 1))
- stop("argument 'p' must have values inside the interval (0,1)")
+ if (!is.Numeric(p, positive = TRUE) ||
+ any(p >= 1))
+ stop("argument 'p' must have values inside the interval (0,1)")
nn = max(length(p), length(alpha))
p = rep(p, length.out = nn)
alpha = rep(alpha, length.out = nn)
ans = (((1 - p)^(-1/alpha) - 1) / 2) # p is in (0,1)
ans[alpha <= 0] = NaN
- floor(ans+1)
+ floor(ans + 1)
}
-rhzeta = function(n, alpha)
-{
+rhzeta <- function(n, alpha) {
ans = (runif(n)^(-1/alpha) - 1) / 2
@@ -314,21 +348,28 @@ rhzeta = function(n, alpha)
- dirmultinomial <- function(lphi = "logit", ephi = list(),
- iphi = 0.10, parallel = FALSE, zero = "M")
-{
+ dirmultinomial <- function(lphi = "logit",
+ iphi = 0.10, parallel = FALSE, zero = "M") {
+
+
+
+
+ lphi <- as.list(substitute(lphi))
+ ephi <- link2list(lphi)
+ lphi <- attr(ephi, "function.name")
- if (mode(lphi) != "character" && mode(lphi) != "name")
- lphi <- as.character(substitute(lphi))
if (length(zero) &&
!(is.Numeric(zero, integer.valued = TRUE, positive = TRUE) ||
is.character(zero )))
stop("bad input for argument 'zero'")
- if (!is.Numeric(iphi, positive = TRUE) || max(iphi) >= 1.0)
+ if (!is.Numeric(iphi, positive = TRUE) ||
+ max(iphi) >= 1.0)
stop("bad input for argument 'iphi'")
- if (!is.list(ephi)) ephi <- list()
+
+
+
new("vglmff",
blurb = c("Dirichlet-multinomial distribution\n\n",
@@ -341,146 +382,180 @@ rhzeta = function(n, alpha)
if (is.character( .ZERO)) .ZERO <- eval(parse(text = .ZERO))
.PARALLEL <- .parallel
if (is.logical( .PARALLEL) && .PARALLEL) {
- mycmatrix <- if (length( .ZERO))
+ mycmatrix <- if (length( .ZERO ))
stop("can only handle parallel = TRUE when zero = NULL") else
- cbind(rbind(matrix(1, M - 1, 1), 0), rbind(matrix(0, M - 1, 1), 1))
- } else
+ cbind(rbind(matrix(1, M - 1, 1), 0),
+ rbind(matrix(0, M - 1, 1), 1))
+ } else {
mycmatrix <- if (M == 1) diag(1) else diag(M)
- constraints <- cm.vgam(mycmatrix, x, .PARALLEL,
- constraints, intercept.apply = TRUE)
- constraints <- cm.zero.vgam(constraints, x, .ZERO, M)
+ }
+ constraints <- cm.vgam(mycmatrix, x, .PARALLEL ,
+ constraints, intercept.apply = TRUE)
+ constraints <- cm.zero.vgam(constraints, x, .ZERO , M)
}), list( .parallel = parallel, .zero = zero ))),
initialize = eval(substitute(expression({
+ mustart.orig = mustart
+
delete.zero.colns <- TRUE
eval(process.categorical.data.vgam)
+ if (length(mustart.orig))
+ mustart = mustart.orig
+
y <- as.matrix(y)
ycount <- as.matrix(y * c(w))
M <- ncol(y)
- if (max(abs(ycount - round(ycount ))) > 1.0e-6)
+
+ if (max(abs(ycount - round(ycount))) > 1.0e-6)
warning("there appears to be non-integer responses")
+
if (min(ycount) < 0)
stop("all values of the response (matrix) must be non-negative")
+
predictors.names <-
c(paste("log(prob[,", 1:(M-1), "]/prob[,", M, "])", sep = ""),
- namesof("phi", .lphi, short = TRUE))
- extra$n2 <- w # aka omega, must be integer # as.vector(apply(y, 1, sum))
+ namesof("phi", .lphi , short = TRUE))
+
+ extra$n2 <- w # aka omega, must be integer # as.vector(apply(y, 1, sum))
+
if (!length(etastart)) {
- prob.init <- colSums(ycount)
- prob.init <- prob.init / sum(prob.init)
- prob.init <- matrix(prob.init, n, M, byrow = TRUE)
- phi.init <- rep( .iphi, length.out = n)
- etastart <- cbind(log(prob.init[,-M]/prob.init[,M]),
- theta2eta(phi.init, .lphi, earg = .ephi ))
- }
- }), list( .lphi = lphi, .ephi = ephi, .iphi=iphi ))),
+ if (length(mustart.orig)) {
+ prob.init <- mustart
+ } else {
+ prob.init <- colSums(ycount)
+ prob.init <- prob.init / sum(prob.init)
+ prob.init <- matrix(prob.init, n, M, byrow = TRUE)
+ }
+
+ phi.init <- rep( .iphi , length.out = n)
+ etastart <-
+ cbind(log(prob.init[, -M] / prob.init[, M]),
+ theta2eta(phi.init, .lphi , earg = .ephi ))
+ }
+
+ mustart <- NULL # Since etastart has been computed.
+
+ }), list( .lphi = lphi, .ephi = ephi, .iphi = iphi ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
M <- if (is.matrix(eta)) ncol(eta) else 1
- temp <- cbind(exp(eta[,-M]), 1)
- temp / as.vector(temp %*% rep(1, M))
+ temp <- cbind(exp(eta[, -M, drop = FALSE]), 1)
+ prop.table(temp, 1)
}, list( .ephi = ephi, .lphi = lphi ))),
last = eval(substitute(expression({
- misc$link <- c(rep("noLinkFunction", length = M-1), .lphi)
- names(misc$link) <- c(paste("prob", 1:(M-1), sep = ""), "phi")
- misc$earg <- vector("list", M)
- names(misc$earg) <- names(misc$link)
- for(ii in 1:(M-1)) misc$earg[[ii]] <- list()
- misc$earg[[M]] <- .ephi
- misc$expected <- TRUE
- if (intercept.only) {
- misc$shape<-probs[1,]*(1/phi[1]-1) # phi & probs computed in @deriv
- }
+ misc$link <- c(rep("noLinkFunction", length = M-1), .lphi)
+ names(misc$link) <- c(paste("prob", 1:(M-1), sep = ""), "phi")
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
+ for(ii in 1:(M-1))
+ misc$earg[[ii]] <- list()
+ misc$earg[[M]] <- .ephi
+
+ misc$expected <- TRUE
+
+ if (intercept.only) {
+ misc$shape = probs[1,] * (1/phi[1]-1) # phi & probs computed in @deriv
+ }
}), list( .ephi = ephi, .lphi = lphi ))),
loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- M = if (is.matrix(eta)) ncol(eta) else 1
- probs <- cbind(exp(eta[,-M]), 1)
- probs <- probs / as.vector(probs %*% rep(1, M))
- phi <- eta2theta(eta[,M], .lphi, earg = .ephi )
- n <- length(phi)
- ycount <- as.matrix(y * c(w))
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- ans <- rep(0.0, length.out = n)
- omega <- extra$n2
- for(jay in 1:M) {
- maxyj <- max(ycount[,jay])
- loopOveri <- n < maxyj
- if (loopOveri) {
- for(iii in 1:n) {
- rrr <- 1:ycount[iii,jay] # a vector
- if (ycount[iii,jay] > 0)
- ans[iii] <- ans[iii] + sum(log((1-phi[iii]) *
- probs[iii,jay] + (rrr-1)*phi[iii]))
- }
- } else {
- for(rrr in 1:maxyj) {
- index <- (rrr <= ycount[,jay]) & (ycount[,jay] > 0)
- if (any(index))
- ans[index] <- ans[index] + log((1-phi[index]) *
- probs[index,jay] + (rrr-1)*phi[index])
- }
- }
- } # end of jay loop
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ M = if (is.matrix(eta)) ncol(eta) else 1
+ probs <- cbind(exp(eta[, -M]), 1)
+ probs <- prop.table(probs, 1)
+ phi <- eta2theta(eta[, M], .lphi , earg = .ephi )
+ n <- length(phi)
+ ycount <- as.matrix(y * c(w))
- maxomega <- max(omega)
- loopOveri <- n < maxomega
+ ycount <- round(ycount)
+
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ ans <- rep(0.0, length.out = n)
+ omega <- extra$n2
+ for(jay in 1:M) {
+ maxyj <- max(ycount[, jay])
+ loopOveri <- n < maxyj
if (loopOveri) {
for(iii in 1:n) {
- rrr <- 1:omega[iii]
- ans[iii]<- ans[iii] - sum(log1p(-phi[iii] + (rrr-1)*phi[iii]))
+ rrr <- 1:ycount[iii, jay] # a vector
+ if (ycount[iii, jay] > 0)
+ ans[iii] <- ans[iii] + sum(log((1-phi[iii]) *
+ probs[iii, jay] + (rrr-1)*phi[iii]))
}
} else {
- for(rrr in 1:maxomega) {
- ind8 <- rrr <= omega
- ans[ind8] <- ans[ind8] - log1p(-phi[ind8] + (rrr-1)*phi[ind8])
+ for(rrr in 1:maxyj) {
+ index <- (rrr <= ycount[, jay]) & (ycount[, jay] > 0)
+ if (any(index))
+ ans[index] <- ans[index] + log((1-phi[index]) *
+ probs[index, jay] + (rrr-1)*phi[index])
}
}
- sum(ans)
+ } # end of jay loop
+
+ maxomega <- max(omega)
+ loopOveri <- n < maxomega
+ if (loopOveri) {
+ for(iii in 1:n) {
+ rrr <- 1:omega[iii]
+ ans[iii]<- ans[iii] - sum(log1p(-phi[iii] + (rrr-1)*phi[iii]))
+ }
+ } else {
+ for(rrr in 1:maxomega) {
+ ind8 <- rrr <= omega
+ ans[ind8] <- ans[ind8] - log1p(-phi[ind8] + (rrr-1)*phi[ind8])
+ }
+ }
+ sum(ans)
}
}, list( .ephi = ephi, .lphi = lphi ))),
- vfamily = c("dirmultinomial "),
+ vfamily = c("dirmultinomial"),
deriv = eval(substitute(expression({
- probs <- cbind(exp(eta[,-M]), 1)
- probs <- probs / as.vector(probs %*% rep(1, M))
- phi <- eta2theta(eta[,M], .lphi, earg = .ephi )
+ probs <- cbind(exp(eta[, -M]), 1)
+ probs <- prop.table(probs, 1)
+
+ phi <- eta2theta(eta[, M], .lphi , earg = .ephi )
+
dl.dprobs <- matrix(0.0, n, M-1)
dl.dphi <- rep(0.0, length.out = n)
+
omega <- extra$n2
ycount <- as.matrix(y * c(w))
+
+ ycount <- round(ycount)
+
for(jay in 1:M) {
- maxyj <- max(ycount[,jay])
+ maxyj <- max(ycount[, jay])
loopOveri <- n < maxyj
if (loopOveri) {
for(iii in 1:n) {
- rrr <- 1:ycount[iii,jay]
- if (ycount[iii,jay] > 0) {
+ rrr <- 1:ycount[iii, jay]
+ if (ycount[iii, jay] > 0) {
PHI <- phi[iii]
dl.dphi[iii] <- dl.dphi[iii] +
- sum((rrr-1-probs[iii,jay]) / ((1-PHI)*probs[iii,jay] + (rrr-1)*PHI))
+ sum((rrr-1-probs[iii, jay]) / ((1-PHI)*probs[iii, jay] + (rrr-1)*PHI))
- tmp9 <- (1-PHI) / ((1-PHI)*probs[iii,jay] + (rrr-1)*PHI)
+ tmp9 <- (1-PHI) / ((1-PHI)*probs[iii, jay] + (rrr-1)*PHI)
if (jay < M) {
- dl.dprobs[iii,jay] <- dl.dprobs[iii,jay] + sum(tmp9)
+ dl.dprobs[iii, jay] <- dl.dprobs[iii, jay] + sum(tmp9)
} else {
for(jay2 in 1:(M-1))
- dl.dprobs[iii,jay2]<-dl.dprobs[iii,jay2]-sum(tmp9)
+ dl.dprobs[iii, jay2]<-dl.dprobs[iii, jay2]-sum(tmp9)
}
}
}
} else {
for(rrr in 1:maxyj) {
- index <- (rrr <= ycount[,jay]) & (ycount[,jay] > 0)
+ index <- (rrr <= ycount[, jay]) & (ycount[, jay] > 0)
PHI <- phi[index]
dl.dphi[index] <- dl.dphi[index] +
- (rrr-1-probs[index,jay]) / ((1-PHI)*probs[index,jay] +
+ (rrr-1-probs[index, jay]) / ((1-PHI)*probs[index, jay] +
(rrr-1)*PHI)
- tmp9 <- (1-PHI) / ((1-PHI)*probs[index,jay] + (rrr-1)*PHI)
+ tmp9 <- (1-PHI) / ((1-PHI)*probs[index, jay] + (rrr-1)*PHI)
if (jay < M) {
- dl.dprobs[index,jay] <- dl.dprobs[index,jay] + tmp9
+ dl.dprobs[index, jay] <- dl.dprobs[index, jay] + tmp9
} else {
for(jay2 in 1:(M-1))
- dl.dprobs[index,jay2] <- dl.dprobs[index,jay2] - tmp9
+ dl.dprobs[index, jay2] <- dl.dprobs[index, jay2] - tmp9
}
}
}
@@ -495,89 +570,97 @@ rhzeta = function(n, alpha)
} else {
for(rrr in 1:maxomega) {
index <- rrr <= omega
- dl.dphi[index]<-dl.dphi[index] - (rrr-2)/(1 + (rrr-2)*phi[index])
+ dl.dphi[index] <-
+ dl.dphi[index] - (rrr-2)/(1 + (rrr-2)*phi[index])
}
}
- dprobs.deta <- probs[,-M] * (1 - probs[,-M]) # n x (M-1)
- dphi.deta <- dtheta.deta(phi, .lphi, earg = .ephi )
+
+ dprobs.deta <- probs[, -M] * (1 - probs[, -M]) # n x (M-1)
+ dphi.deta <- dtheta.deta(phi, .lphi , earg = .ephi )
+
ans <- cbind(dl.dprobs * dprobs.deta,
dl.dphi * dphi.deta)
ans
}), list( .ephi = ephi, .lphi = lphi ))),
weight = eval(substitute(expression({
wz <- matrix(0, n, dimm(M))
- loopOveri <- n < maxomega
+ loopOveri <- (n < maxomega)
if (loopOveri) {
for(iii in 1:n) {
rrr <- 1:omega[iii] # A vector
PHI <- phi[iii]
- pYiM.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1, size=omega[iii],
- shape1<-probs[iii,M]*(1/PHI-1),
- shape2<-(1-probs[iii,M])*(1/PHI-1)) # A vector
- denomM <- ((1-PHI)*probs[iii,M] + (rrr-1)*PHI)^2 # A vector
- wz[iii,iam(M,M,M)] <- wz[iii,iam(M,M,M)] +
- sum(probs[iii,M]^2 * pYiM.ge.rrr / denomM) -
+ pYiM.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1,
+ size = omega[iii],
+ shape1<-probs[iii, M]*(1/PHI-1),
+ shape2<-(1-probs[iii, M])*(1/PHI-1)) # A vector
+ denomM <- ((1-PHI)*probs[iii, M] + (rrr-1)*PHI)^2 # A vector
+ wz[iii, iam(M, M, M)] <- wz[iii, iam(M, M, M)] +
+ sum(probs[iii, M]^2 * pYiM.ge.rrr / denomM) -
sum(1 / (1 + (rrr-2)*PHI)^2)
for(jay in 1:(M-1)) {
- denomj <- ((1-PHI)*probs[iii,jay] + (rrr-1)*PHI)^2
- pYij.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1, size=omega[iii],
- shape1<-probs[iii,jay]*(1/PHI-1),
- shape2<-(1-probs[iii,jay])*(1/PHI-1))
- wz[iii,iam(jay,jay,M)] <- wz[iii,iam(jay,jay,M)] +
+ denomj <- ((1-PHI)*probs[iii, jay] + (rrr-1)*PHI)^2
+ pYij.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1,
+ size = omega[iii],
+ shape1<-probs[iii, jay]*(1/PHI-1),
+ shape2<-(1-probs[iii, jay])*(1/PHI-1))
+ wz[iii, iam(jay, jay, M)] <- wz[iii, iam(jay, jay, M)] +
sum(pYij.ge.rrr / denomj) +
sum(pYiM.ge.rrr / denomM)
for(kay in jay:(M-1)) if (kay > jay) {
- wz[iii,iam(jay,kay,M)] <- wz[iii,iam(jay,kay,M)] +
- sum(pYiM.ge.rrr / denomM)
+ wz[iii, iam(jay, kay, M)] <- wz[iii, iam(jay, kay, M)] +
+ sum(pYiM.ge.rrr / denomM)
}
- wz[iii,iam(jay,M,M)] <- wz[iii,iam(jay,M,M)] +
- sum(probs[iii,jay] * pYij.ge.rrr / denomj) -
- sum(probs[iii,M] * pYiM.ge.rrr / denomM)
- wz[iii,iam(M,M,M)] <- wz[iii,iam(M,M,M)] +
- sum(probs[iii,jay]^2 * pYij.ge.rrr / denomj)
+ wz[iii, iam(jay, M, M)] <- wz[iii, iam(jay, M, M)] +
+ sum(probs[iii, jay] * pYij.ge.rrr / denomj) -
+ sum(probs[iii, M] * pYiM.ge.rrr / denomM)
+ wz[iii, iam(M, M, M)] <- wz[iii, iam(M, M, M)] +
+ sum(probs[iii, jay]^2 * pYij.ge.rrr / denomj)
} # end of jay loop
} # end of iii loop
} else {
for(rrr in 1:maxomega) {
ind5 <- rrr <= omega
PHI <- phi[ind5]
- pYiM.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1, size=omega[ind5],
- shape1<-probs[ind5,M]*(1/PHI-1),
- shape2<-(1-probs[ind5,M])*(1/PHI-1))
- denomM <- ((1-PHI)*probs[ind5,M] + (rrr-1)*PHI)^2
- wz[ind5,iam(M,M,M)] <- wz[ind5,iam(M,M,M)] +
- probs[ind5,M]^2 * pYiM.ge.rrr / denomM -
+ pYiM.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1,
+ size = omega[ind5],
+ shape1<-probs[ind5, M]*(1/PHI-1),
+ shape2<-(1-probs[ind5, M])*(1/PHI-1))
+ denomM <- ((1-PHI)*probs[ind5, M] + (rrr-1)*PHI)^2
+ wz[ind5, iam(M, M, M)] <- wz[ind5, iam(M, M, M)] +
+ probs[ind5, M]^2 * pYiM.ge.rrr / denomM -
1 / (1 + (rrr-2)*PHI)^2
for(jay in 1:(M-1)) {
- denomj <- ((1-PHI)*probs[ind5,jay] + (rrr-1)*PHI)^2
- pYij.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1, size=omega[ind5],
- shape1<-probs[ind5,jay]*(1/PHI-1),
- shape2<-(1-probs[ind5,jay])*(1/PHI-1))
- wz[ind5,iam(jay,jay,M)] <- wz[ind5,iam(jay,jay,M)] +
+ denomj <- ((1-PHI)*probs[ind5, jay] + (rrr-1)*PHI)^2
+ pYij.ge.rrr <- 1 - pbetabinom.ab(q = rrr-1,
+ size = omega[ind5],
+ shape1<-probs[ind5, jay]*(1/PHI-1),
+ shape2<-(1-probs[ind5, jay])*(1/PHI-1))
+ wz[ind5, iam(jay, jay, M)] <- wz[ind5, iam(jay, jay, M)] +
pYij.ge.rrr / denomj + pYiM.ge.rrr / denomM
for(kay in jay:(M-1)) if (kay > jay) {
- wz[ind5,iam(jay,kay,M)] <- wz[ind5,iam(jay,kay,M)] +
- pYiM.ge.rrr / denomM
+ wz[ind5, iam(jay, kay, M)] <- wz[ind5, iam(jay, kay, M)] +
+ pYiM.ge.rrr / denomM
}
- wz[ind5,iam(jay,M,M)] <- wz[ind5,iam(jay,M,M)] +
- probs[ind5,jay] * pYij.ge.rrr / denomj -
- probs[ind5,M] * pYiM.ge.rrr / denomM
- wz[ind5,iam(M,M,M)] <- wz[ind5,iam(M,M,M)] +
- probs[ind5,jay]^2 * pYij.ge.rrr / denomj
+ wz[ind5, iam(jay, M, M)] <- wz[ind5, iam(jay, M, M)] +
+ probs[ind5, jay] * pYij.ge.rrr / denomj -
+ probs[ind5, M] * pYiM.ge.rrr / denomM
+ wz[ind5, iam(M, M, M)] <- wz[ind5, iam(M, M, M)] +
+ probs[ind5, jay]^2 * pYij.ge.rrr / denomj
} # end of jay loop
} # end of rrr loop
}
for(jay in 1:(M-1))
- for(kay in jay:(M-1))
- wz[,iam(jay,kay,M)] <- wz[,iam(jay,kay,M)] * (1-phi)^2
+ for(kay in jay:(M-1))
+ wz[, iam(jay, kay, M)] <- wz[, iam(jay, kay, M)] * (1-phi)^2
for(jay in 1:(M-1))
- wz[,iam(jay,M,M)] <- wz[,iam(jay,M,M)] * (phi-1) / phi
- wz[,iam(M,M,M)] <- wz[,iam(M,M,M)] / phi^2
+ wz[, iam(jay, M, M)] <- wz[, iam(jay, M, M)] * (phi-1) / phi
+ wz[, iam(M, M, M)] <- wz[, iam(M, M, M)] / phi^2
- d1Thetas.deta <- cbind(dprobs.deta, dphi.deta)
+ d1Thetas.deta <- cbind(dprobs.deta,
+ dphi.deta)
index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
- wz <- wz * d1Thetas.deta[,index$row] * d1Thetas.deta[,index$col]
+ wz <- wz * d1Thetas.deta[, index$row] * d1Thetas.deta[, index$col]
wz
}), list( .ephi = ephi, .lphi = lphi ))))
}
@@ -586,12 +669,14 @@ rhzeta = function(n, alpha)
-dirmul.old = function(link = "loge", earg = list(), init.alpha = 0.01,
- parallel = FALSE, zero = NULL)
+dirmul.old <- function(link = "loge", init.alpha = 0.01,
+ parallel = FALSE, zero = NULL)
{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
if (length(zero) &&
!is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
@@ -600,89 +685,96 @@ dirmul.old = function(link = "loge", earg = list(), init.alpha = 0.01,
if (!is.Numeric(init.alpha, positive = TRUE))
stop("'init.alpha' must contain positive values only")
- if (!is.list(earg))
- earg = list()
- new("vglmff",
- blurb = c("Dirichlet-Multinomial distribution\n\n",
- "Links: ",
- namesof("shape1", link, earg = earg), ", ..., ",
- namesof("shapeM", link, earg = earg), "\n\n",
- "Posterior mean: (n_j + shape_j)/(2*sum(n_j) + sum(shape_j))\n"),
- constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1, M, 1), x, .parallel,
- constraints, intercept.apply = TRUE)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .parallel = parallel, .zero = zero ))),
- initialize = eval(substitute(expression({
- y = as.matrix(y)
- M = ncol(y)
- if (any(y != round(y )))
- stop("all y values must be integer-valued")
-
- predictors.names = namesof(paste("shape", 1:M, sep = ""),
- .link, earg = .earg, short = TRUE)
- extra$n2 = rowSums(y) # Nb. don't multiply by 2
- extra$y = y
- if (!length(etastart)) {
- yy = if (is.numeric( .init.alpha))
- matrix( .init.alpha, n, M, byrow= TRUE) else
- matrix(runif(n*M), n, M)
- etastart = theta2eta(yy, .link, earg = .earg )
- }
- }), list( .link = link, .earg = earg, .init.alpha=init.alpha ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- shape = eta2theta(eta, .link, earg = .earg )
- M = if (is.matrix(eta)) ncol(eta) else 1
- sumshape = as.vector(shape %*% rep(1, length.out = M))
- (extra$y + shape) / (extra$n2 + sumshape)
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = rep( .link, length = M)
- names(misc$link) = paste("shape", 1:M, sep = "")
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- for(ii in 1:M) misc$earg[[ii]] = .earg
- misc$pooled.weight = pooled.weight
- }), list( .link = link, .earg = earg ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- shape = eta2theta(eta, .link, earg = .earg )
- M = if (is.matrix(eta)) ncol(eta) else 1
- sumshape = as.vector(shape %*% rep(1, length.out = M))
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * (lgamma(sumshape) - lgamma(extra$n2 + sumshape ))) +
- sum(w * (lgamma(y + shape) - lgamma(shape )))
- }, list( .link = link, .earg = earg ))),
- vfamily = c("dirmul.old"),
- deriv = eval(substitute(expression({
- shape = eta2theta(eta, .link, earg = .earg )
- sumshape = as.vector(shape %*% rep(1, length.out = M))
- dl.dsh = digamma(sumshape) - digamma(extra$n2 + sumshape) +
- digamma(y + shape) - digamma(shape)
- dsh.deta = dtheta.deta(shape, .link, earg = .earg )
- c(w) * dl.dsh * dsh.deta
- }), list( .link = link, .earg = earg ))),
- weight = eval(substitute(expression({
- index = iam(NA, NA, M, both = TRUE, diag = TRUE)
- wz = matrix(trigamma(sumshape)-trigamma(extra$n2 + sumshape),
- nrow=n, ncol=dimm(M))
- wz[, 1:M] = wz[, 1:M] + trigamma(y + shape) - trigamma(shape)
- wz = -wz * dsh.deta[, index$row] * dsh.deta[, index$col]
-
-
- if (TRUE && intercept.only) {
- sumw = sum(w)
- for(ii in 1:ncol(wz))
- wz[,ii] = sum(wz[,ii]) / sumw
- pooled.weight = TRUE
- wz = c(w) * wz # Put back the weights
- } else
- pooled.weight = FALSE
+ new("vglmff",
+ blurb = c("Dirichlet-Multinomial distribution\n\n",
+ "Links: ",
+ namesof("shape1", link, earg = earg), ", ..., ",
+ namesof("shapeM", link, earg = earg), "\n\n",
+ "Posterior mean: (n_j + shape_j)/(2*sum(n_j) + ",
+ "sum(shape_j))\n"),
+ constraints = eval(substitute(expression({
+ constraints = cm.vgam(matrix(1, M, 1), x, .parallel ,
+ constraints, intercept.apply = TRUE)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .parallel = parallel, .zero = zero ))),
+ initialize = eval(substitute(expression({
+ y = as.matrix(y)
+ M = ncol(y)
+ if (any(y != round(y )))
+ stop("all y values must be integer-valued")
- wz
- }), list( .link = link, .earg = earg ))))
+ predictors.names <- namesof(paste("shape", 1:M, sep = ""),
+ .link , earg = .earg , short = TRUE)
+
+ extra$n2 = rowSums(y) # Nb. don't multiply by 2
+ extra$y = y
+
+ if (!length(etastart)) {
+ yy = if (is.numeric( .init.alpha))
+ matrix( .init.alpha, n, M, byrow = TRUE) else
+ matrix(runif(n*M), n, M)
+ etastart <- theta2eta(yy, .link , earg = .earg )
+ }
+ }), list( .link = link, .earg = earg, .init.alpha = init.alpha ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ shape = eta2theta(eta, .link , earg = .earg )
+ M = if (is.matrix(eta)) ncol(eta) else 1
+ sumshape = as.vector(shape %*% rep(1, length.out = M))
+ (extra$y + shape) / (extra$n2 + sumshape)
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link = rep( .link , length = M)
+ names(misc$link) = paste("shape", 1:M, sep = "")
+
+ misc$earg = vector("list", M)
+ names(misc$earg) = names(misc$link)
+ for(ii in 1:M)
+ misc$earg[[ii]] = .earg
+
+ misc$pooled.weight = pooled.weight
+ }), list( .link = link, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ shape = eta2theta(eta, .link , earg = .earg )
+ M = if (is.matrix(eta)) ncol(eta) else 1
+ sumshape = as.vector(shape %*% rep(1, length.out = M))
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(c(w) * (lgamma(sumshape) - lgamma(extra$n2 + sumshape ))) +
+ sum(c(w) * (lgamma(y + shape) - lgamma(shape )))
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("dirmul.old"),
+ deriv = eval(substitute(expression({
+ shape = eta2theta(eta, .link , earg = .earg )
+
+ sumshape = as.vector(shape %*% rep(1, length.out = M))
+ dl.dsh = digamma(sumshape) - digamma(extra$n2 + sumshape) +
+ digamma(y + shape) - digamma(shape)
+
+ dsh.deta = dtheta.deta(shape, .link , earg = .earg )
+
+ c(w) * dl.dsh * dsh.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+ index = iam(NA, NA, M, both = TRUE, diag = TRUE)
+ wz = matrix(trigamma(sumshape) - trigamma(extra$n2 + sumshape),
+ nrow = n, ncol = dimm(M))
+ wz[, 1:M] = wz[, 1:M] + trigamma(y + shape) - trigamma(shape)
+ wz = -wz * dsh.deta[, index$row] * dsh.deta[, index$col]
+
+
+ if (TRUE && intercept.only) {
+ sumw = sum(w)
+ for(ii in 1:ncol(wz))
+ wz[, ii] = sum(wz[, ii]) / sumw
+ pooled.weight = TRUE
+ wz = c(w) * wz # Put back the weights
+ } else
+ pooled.weight = FALSE
+
+ wz
+ }), list( .link = link, .earg = earg ))))
}
@@ -690,7 +782,7 @@ dirmul.old = function(link = "loge", earg = list(), init.alpha = 0.01,
-rdiric = function(n, shape, dimension = NULL) {
+rdiric <- function(n, shape, dimension = NULL) {
use.n = if ((length.n <- length(n)) > 1) length.n else
if (!is.Numeric(n, integer.valued = TRUE,
@@ -713,148 +805,167 @@ rdiric = function(n, shape, dimension = NULL) {
- dirichlet = function(link = "loge", earg = list(),
- parallel = FALSE, zero = NULL)
-{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
- if (!is.list(earg)) earg = list()
-
- new("vglmff",
- blurb = c("Dirichlet distribution\n\n",
- "Links: ",
- namesof("shapej", link, earg = earg), "\n\n",
- "Mean: shape_j/(1 + sum(shape_j)), j = 1,..,ncol(y)"),
- constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1, M, 1), x, .parallel, constraints, int= TRUE)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .parallel = parallel, .zero = zero ))),
- initialize = eval(substitute(expression({
- y = as.matrix(y)
- M = ncol(y)
- if (any(y <= 0) || any(y>=1))
- stop("all y values must be > 0 and < 1")
- predictors.names = namesof(paste("shape", 1:M, sep = ""), .link,
- earg = .earg, short = TRUE)
- if (!length(etastart)) {
- yy = matrix(t(y) %*% rep(1/nrow(y), nrow(y)), nrow(y), M,
- byrow= TRUE)
- etastart = theta2eta(yy, .link, earg = .earg )
- }
- }), list( .link = link, .earg = earg ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- shape = eta2theta(eta, .link, earg = .earg )
- M = if (is.matrix(eta)) ncol(eta) else 1
- sumshape = rowSums(shape)
- shape / sumshape
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = c(shape = .link)
- temp.names = paste("shape", 1:M, sep = "")
- misc$link = rep( .link, length.out = M)
- names(misc$link) = temp.names
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- for(ii in 1:M) misc$earg[[ii]] = .earg
- misc$expected = TRUE
- }), list( .link = link, .earg = earg ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- shape = eta2theta(eta, .link, earg = .earg )
- M = if (is.matrix(eta)) ncol(eta) else 1
- sumshape = as.vector(shape %*% rep(1, length.out = M))
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(c(w) * lgamma(sumshape)) -
- sum(c(w) * lgamma(shape)) +
- sum(c(w) * (shape-1) * log(y))
- }
- }, list( .link = link, .earg = earg ))),
- vfamily = c("dirichlet"),
- deriv = eval(substitute(expression({
- shape = eta2theta(eta, .link, earg = .earg )
- sumshape = as.vector(shape %*% rep(1, length.out = M))
- dl.dsh = digamma(sumshape) - digamma(shape) + log(y)
- dsh.deta = dtheta.deta(shape, .link, earg = .earg )
- c(w) * dl.dsh * dsh.deta
- }), list( .link = link, .earg = earg ))),
- weight = expression({
- index = iam(NA, NA, M, both = TRUE, diag = TRUE)
- wz = matrix(trigamma(sumshape), nrow=n, ncol=dimm(M))
- wz[, 1:M] = wz[, 1:M] - trigamma(shape)
- wz = -c(w) * wz * dsh.deta[, index$row] * dsh.deta[, index$col]
- wz
- }))
-}
+ dirichlet <- function(link = "loge", parallel = FALSE, zero = NULL) {
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
- zeta = function(x, deriv = 0) {
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
- deriv.arg = deriv
- rm(deriv)
- if (!is.Numeric(deriv.arg, allowable.length = 1,
- integer.valued = TRUE))
- stop("'deriv' must be a single non-negative integer")
- if (deriv.arg < 0 || deriv.arg > 2)
- stop("'deriv' must be 0, 1, or 2")
+ new("vglmff",
+ blurb = c("Dirichlet distribution\n\n",
+ "Links: ",
+ namesof("shapej", link, earg = earg), "\n\n",
+ "Mean: shape_j/(1 + sum(shape_j)), j = 1,..,ncol(y)"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.vgam(matrix(1, M, 1), x, .parallel ,
+ constraints, int= TRUE)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .parallel = parallel, .zero = zero ))),
+ initialize = eval(substitute(expression({
+ y <- as.matrix(y)
+ M <- ncol(y)
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = 1,
+ ncol.y.max = Inf,
+ out.wy = FALSE,
+ colsyperw = NULL,
+ maximize = FALSE)
- if (deriv.arg > 0)
- return(Zeta.derivative(x, deriv.arg = deriv.arg))
+ if (any(y <= 0) || any(y >= 1))
+ stop("all y values must be > 0 and < 1")
+ mynames1 <- paste("shape", 1:M, sep = "")
+ predictors.names <-
+ namesof(mynames1, .link , earg = .earg , short = TRUE)
+ if (!length(etastart)) {
+ yy <- matrix(t(y) %*% rep(1 / nrow(y), nrow(y)), nrow(y), M,
+ byrow = TRUE)
+ etastart <- theta2eta(yy, .link , earg = .earg )
+ }
+ }), list( .link = link, .earg = earg ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ shape <- eta2theta(eta, .link , earg = .earg )
+ prop.table(shape, 1)
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link <- rep( .link , length.out = M)
+ names(misc$link) <- mynames1
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- mynames1
+ for(ii in 1:M)
+ misc$earg[[ii]] <- .earg
- if (any(special <- Re(x) <= 1)) {
- ans <- x
- ans[special] <- Inf # For Re(x) == 1
+ misc$expected <- TRUE
+ }), list( .link = link, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ shape <- eta2theta(eta, .link , earg = .earg )
+ M <- if (is.matrix(eta)) ncol(eta) else 1
+ sumshape <- as.vector(shape %*% rep(1, length.out = M))
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * lgamma(sumshape)) -
+ sum(c(w) * lgamma(shape)) +
+ sum(c(w) * (shape-1) * log(y))
+ }
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("dirichlet"),
+ deriv = eval(substitute(expression({
+ shape <- eta2theta(eta, .link , earg = .earg )
- special3 <- Re(x) < 1
- ans[special3] <- NA # For 0 < Re(x) < 1
+ sumshape <- as.vector(shape %*% rep(1, length.out = M))
+ dl.dsh <- digamma(sumshape) - digamma(shape) + log(y)
- special4 <- (0 < Re(x)) & (Re(x) < 1) & (Im(x) == 0)
- ans[special4] <- Zeta.derivative(x[special4], deriv.arg = deriv.arg)
+ dsh.deta <- dtheta.deta(shape, .link , earg = .earg )
+ c(w) * dl.dsh * dsh.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = expression({
+ index <- iam(NA, NA, M, both = TRUE, diag = TRUE)
+ wz <- matrix(trigamma(sumshape), nrow = n, ncol = dimm(M))
+ wz[, 1:M] <- wz[, 1:M] - trigamma(shape)
+ wz <- -c(w) * wz * dsh.deta[, index$row] * dsh.deta[, index$col]
+ wz
+ }))
+}
- special2 <- Re(x) < 0
- if (any(special2)) {
- x2 = x[special2]
- cx = 1-x2
- ans[special2] = 2^(x2) * pi^(x2-1) * sin(pi*x2/2) * gamma(cx) * Recall(cx)
- }
- if (any(!special)) {
- ans[!special] <- Recall(x[!special])
- }
- return(ans)
- }
- a = 12; k = 8
- B = c(1/6, -1/30,1/42,-1/30,5/66,-691/2730,7/6,-3617/510)
- ans = 0
- for(ii in 1:(a-1))
- ans = ans + 1.0 / ii^x
- ans = ans + 1.0 / ((x-1.0)* a^(x-1.0)) + 1.0 / (2.0 * a^x)
- term = (x/2) / a^(x+1)
- ans = ans + term * B[1]
+ zeta <- function(x, deriv = 0) {
- for(mm in 2:k) {
- term = term * (x+2*mm-2) * (x+2*mm-3) / (a * a * 2 * mm * (2*mm-1))
- ans = ans + term * B[mm]
+
+
+ deriv.arg = deriv
+ rm(deriv)
+ if (!is.Numeric(deriv.arg, allowable.length = 1,
+ integer.valued = TRUE))
+ stop("'deriv' must be a single non-negative integer")
+ if (deriv.arg < 0 || deriv.arg > 2)
+ stop("'deriv' must be 0, 1, or 2")
+
+
+ if (deriv.arg > 0)
+ return(Zeta.derivative(x, deriv.arg = deriv.arg))
+
+
+
+ if (any(special <- Re(x) <= 1)) {
+ ans <- x
+ ans[special] <- Inf # For Re(x) == 1
+
+ special3 <- Re(x) < 1
+ ans[special3] <- NA # For 0 < Re(x) < 1
+
+ special4 <- (0 < Re(x)) & (Re(x) < 1) & (Im(x) == 0)
+ ans[special4] <- Zeta.derivative(x[special4], deriv.arg = deriv.arg)
+
+
+ special2 <- Re(x) < 0
+ if (any(special2)) {
+ x2 = x[special2]
+ cx = 1-x2
+ ans[special2] = 2^(x2) * pi^(x2-1) * sin(pi*x2/2) *
+ gamma(cx) * Recall(cx)
}
- ans
+
+ if (any(!special)) {
+ ans[!special] <- Recall(x[!special])
+ }
+ return(ans)
+ }
+
+ a = 12; k = 8
+ B = c(1/6, -1/30,1/42,-1/30,5/66,-691/2730,7/6,-3617/510)
+ ans = 0
+ for(ii in 1:(a-1))
+ ans = ans + 1.0 / ii^x
+ ans = ans + 1.0 / ((x-1.0)* a^(x-1.0)) + 1.0 / (2.0 * a^x)
+
+ term = (x/2) / a^(x+1)
+ ans = ans + term * B[1]
+
+ for(mm in 2:k) {
+ term = term * (x+2*mm-2) * (x+2*mm-3) / (a * a * 2 * mm * (2*mm-1))
+ ans = ans + term * B[mm]
+ }
+ ans
}
- Zeta.derivative = function(x, deriv.arg = 0)
+ Zeta.derivative <- function(x, deriv.arg = 0)
{
@@ -886,11 +997,12 @@ rdiric = function(n, shape, dimension = NULL) {
-dzeta = function(x, p, log = FALSE)
+dzeta <- function(x, p, log = FALSE)
{
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
if (!is.Numeric(p, positive = TRUE)) # || min(p) <= 1
stop("'p' must be numeric and > 0")
@@ -913,85 +1025,136 @@ dzeta = function(x, p, log = FALSE)
ans
}
- zetaff = function(link = "loge", earg = list(), init.p = NULL)
+
+ zetaff <- function(link = "loge", init.p = NULL, zero = NULL)
{
- if (length(init.p) && !is.Numeric(init.p, positive = TRUE))
- stop("argument 'init.p' must be > 0")
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
+ if (length(init.p) && !is.Numeric(init.p, positive = TRUE))
+ stop("argument 'init.p' must be > 0")
- if (!is.list(earg)) earg = list()
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
- new("vglmff",
- blurb = c("Zeta distribution ",
- "f(y) = 1/(y^(p+1) zeta(p+1)), p>0, y = 1,2,..\n\n",
- "Link: ",
- namesof("p", link, earg = earg), "\n\n",
- "Mean: zeta(p) / zeta(p+1), provided p>1\n",
- "Variance: zeta(p-1) / zeta(p+1) - mean^2, provided p>2"),
- initialize = eval(substitute(expression({
- y = as.numeric(y)
- if (any(y < 1))
- stop("all y values must be in 1,2,3,...")
- if (any(y != round(y )))
- warning("'y' should be integer-valued")
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names = namesof("p", .link, earg = .earg, tag = FALSE)
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
- if (!length(etastart)) {
- zetaff.Loglikfun = function(pp, y, x, w, extraargs) {
- sum(w * dzeta(x = y, p = pp, log = TRUE))
- }
- p.grid = seq(0.1, 3.0, length.out = 19)
- pp.init = if (length( .init.p )) .init.p else
- getMaxMin(p.grid, objfun = zetaff.Loglikfun,
- y = y, x = x, w = w)
- pp.init = rep(pp.init, length = length(y))
- if ( .link == "loglog") pp.init[pp.init <= 1] = 1.2
- etastart = theta2eta(pp.init, .link, earg = .earg )
- }
- }), list( .link = link, .earg = earg, .init.p = init.p ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- ans <- pp <- eta2theta(eta, .link, earg = .earg )
- ans[pp > 1] <- zeta(pp[pp > 1]) / zeta(pp[pp > 1] + 1)
- ans[pp <= 1] <- NA
- ans
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link <- c(pp = .link)
- misc$earg <- list(pp = .earg )
- }), list( .link = link, .earg = earg ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- pp = eta2theta(eta, .link, earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dzeta(x = y, p = pp, log = TRUE))
- }
- }, list( .link = link, .earg = earg ))),
- vfamily = c("zetaff"),
- deriv = eval(substitute(expression({
- pp = eta2theta(eta, .link, earg = .earg )
- fred1 = zeta(pp+1)
- fred2 = zeta(pp+1, deriv=1)
- dl.dpp = -log(y) - fred2 / fred1
- dpp.deta = dtheta.deta(pp, .link, earg = .earg )
- c(w) * dl.dpp * dpp.deta
- }), list( .link = link, .earg = earg ))),
- weight = expression({
- ed2l.dpp2 = zeta(pp+1, deriv=2) / fred1 - (fred2/fred1)^2
- wz = c(w) * dpp.deta^2 * ed2l.dpp2
- wz
- }))
+ new("vglmff",
+ blurb = c("Zeta distribution ",
+ "f(y) = 1/(y^(p+1) zeta(p+1)), p>0, y = 1, 2,..\n\n",
+ "Link: ",
+ namesof("p", link, earg = earg), "\n\n",
+ "Mean: zeta(p) / zeta(p+1), provided p>1\n",
+ "Variance: zeta(p-1) / zeta(p+1) - mean^2, provided p>2"),
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ multipleResponses = TRUE,
+ zero = .zero ,
+ link = .link)
+ }, list( .link = link,
+ .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ Is.integer.y = TRUE,
+ Is.positive.y = TRUE,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ ncoly <- ncol(y)
+
+ mynames1 <- paste("p", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ namesof(mynames1, .link , earg = .earg , tag = FALSE)
+
+ Musual <- 1
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+ if (!length(etastart)) {
+ zetaff.Loglikfun <- function(pp, y, x, w, extraargs) {
+ sum(c(w) * dzeta(x = y, p = pp, log = TRUE))
+ }
+
+
+ p.grid <- seq(0.1, 3.0, length.out = 19)
+ pp.init <- matrix( if (length( .init.p )) .init.p else -1,
+ n, M, byrow = TRUE)
+ if (!length( .init.p ))
+ for (spp. in 1:ncoly) {
+ pp.init[, spp.] <- getMaxMin(p.grid, objfun = zetaff.Loglikfun,
+ y = y[, spp.], x = x, w = w[, spp.])
+ if ( .link == "loglog")
+ pp.init[pp.init <= 1, spp.] <- 1.2
+ }
+
+ etastart <- theta2eta(pp.init, .link , earg = .earg )
+ }
+ }), list( .link = link, .earg = earg, .init.p = init.p ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ ans <- pp <- eta2theta(eta, .link , earg = .earg )
+ ans[pp > 1] <- zeta(pp[pp > 1]) / zeta(pp[pp > 1] + 1)
+ ans[pp <= 1] <- NA
+ ans
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+
+ misc$link <- rep( .link , length = ncoly)
+ names(misc$link) <- mynames1
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- mynames1
+ for(ii in 1:ncoly) {
+ misc$earg[[ii]] <- .earg
+ }
+
+ misc$multipleResponses <- TRUE
+ }), list( .link = link, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ pp = eta2theta(eta, .link , earg = .earg )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dzeta(x = y, p = pp, log = TRUE))
+ }
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("zetaff"),
+ deriv = eval(substitute(expression({
+ pp = eta2theta(eta, .link , earg = .earg )
+
+ fred1 = zeta(pp+1)
+ fred2 = zeta(pp+1, deriv = 1)
+ dl.dpp = -log(y) - fred2 / fred1
+
+ dpp.deta = dtheta.deta(pp, .link , earg = .earg )
+
+ c(w) * dl.dpp * dpp.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = expression({
+ NOS <- ncol(y)
+ nd2l.dpp2 <- zeta(pp + 1, deriv = 2) / fred1 - (fred2/fred1)^2
+ wz <- nd2l.dpp2 * dpp.deta^2
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
+ }))
}
-gharmonic = function(n, s = 1, lognexponent = 0) {
+gharmonic <- function(n, s = 1, lognexponent = 0) {
if (!is.Numeric(n, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'n'")
@@ -1014,18 +1177,20 @@ gharmonic = function(n, s = 1, lognexponent = 0) {
}
}
-dzipf = function(x, N, s, log = FALSE)
+
+dzipf <- function(x, N, s, log = FALSE)
{
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
if (!is.Numeric(x))
- stop("bad input for argument 'x'")
+ stop("bad input for argument 'x'")
if (!is.Numeric(N, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'N'")
+ stop("bad input for argument 'N'")
if (!is.Numeric(s, positive = TRUE))
- stop("bad input for argument 's'")
+ stop("bad input for argument 's'")
nn = max(length(x), length(N), length(s))
x = rep(x, length.out = nn);
N = rep(N, length.out = nn);
@@ -1045,7 +1210,7 @@ dzipf = function(x, N, s, log = FALSE)
-pzipf = function(q, N, s) {
+pzipf <- function(q, N, s) {
if (!is.Numeric(q))
stop("bad input for argument 'q'")
if (!is.Numeric(N, integer.valued = TRUE, positive = TRUE))
@@ -1069,92 +1234,94 @@ pzipf = function(q, N, s) {
}
- zipf = function(N = NULL, link = "loge", earg = list(), init.s = NULL)
-{
- if (length(N) &&
- (!is.Numeric(N, positive = TRUE,
- integer.valued = TRUE, allowable.length = 1) ||
- N <= 1))
- stop("bad input for argument 'N'")
- enteredN = length(N)
- if (length(init.s) && !is.Numeric(init.s, positive = TRUE))
- stop("argument 'init.s' must be > 0")
+ zipf <- function(N = NULL, link = "loge", init.s = NULL) {
+ if (length(N) &&
+ (!is.Numeric(N, positive = TRUE,
+ integer.valued = TRUE, allowable.length = 1) ||
+ N <= 1))
+ stop("bad input for argument 'N'")
+ enteredN = length(N)
+ if (length(init.s) && !is.Numeric(init.s, positive = TRUE))
+ stop("argument 'init.s' must be > 0")
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
- new("vglmff",
- blurb = c("Zipf distribution f(y;s) = y^(-s) / sum((1:N)^(-s)),",
- " s > 0, y = 1,2,...,N",
- ifelse(enteredN, paste(" = ",N,sep = ""), ""),
- "\n\n",
- "Link: ",
- namesof("s", link, earg = earg),
- "\n\n",
- "Mean: gharmonic(N,s-1) / gharmonic(N,s)"),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- y = as.numeric(y)
- if (any(y != round(y )))
- stop("y must be integer-valued")
- predictors.names = namesof("s", .link, earg = .earg, tag = FALSE)
- NN = .N
- if (!is.Numeric(NN, allowable.length = 1,
- positive = TRUE, integer.valued = TRUE))
- NN = max(y)
- if (max(y) > NN)
- stop("maximum of the response is greater than argument 'N'")
- if (any(y < 1))
- stop("all response values must be in 1,2,3,...,N( = ", NN,")")
- extra$N = NN
- if (!length(etastart)) {
- llfun = function(ss, y, N, w) {
- sum(w * dzipf(x = y, N=extra$N, s=ss, log = TRUE))
- }
- ss.init = if (length( .init.s )) .init.s else
- getInitVals(gvals = seq(0.1, 3.0, length.out = 19),
- llfun=llfun,
- y = y, N=extra$N, w = w)
- ss.init = rep(ss.init, length = length(y))
- if ( .link == "loglog") ss.init[ss.init <= 1] = 1.2
- etastart = theta2eta(ss.init, .link, earg = .earg )
- }
- }), list( .link = link, .earg = earg, .init.s = init.s, .N = N ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- ss = eta2theta(eta, .link, earg = .earg )
- gharmonic(extra$N, s=ss - 1) / gharmonic(extra$N, s=ss)
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$expected = FALSE
- misc$link = c(s = .link)
- misc$earg = list(s = .earg )
- misc$N = extra$N
- }), list( .link = link, .earg = earg ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- ss = eta2theta(eta, .link, earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dzipf(x = y, N=extra$N, s=ss, log = TRUE))
+
+ new("vglmff",
+ blurb = c("Zipf distribution f(y;s) = y^(-s) / sum((1:N)^(-s)),",
+ " s > 0, y = 1, 2,...,N",
+ ifelse(enteredN, paste(" = ",N,sep = ""), ""),
+ "\n\n",
+ "Link: ",
+ namesof("s", link, earg = earg),
+ "\n\n",
+ "Mean: gharmonic(N,s-1) / gharmonic(N,s)"),
+ initialize = eval(substitute(expression({
+
+
+ w.y.check(w = w, y = y,
+ Is.integer.y = TRUE)
+
+
+ predictors.names <- namesof("s", .link , earg = .earg , tag = FALSE)
+
+ NN = .N
+ if (!is.Numeric(NN, allowable.length = 1,
+ positive = TRUE, integer.valued = TRUE))
+ NN = max(y)
+ if (max(y) > NN)
+ stop("maximum of the response is greater than argument 'N'")
+ if (any(y < 1))
+ stop("all response values must be in 1, 2, 3,...,N( = ", NN,")")
+ extra$N = NN
+ if (!length(etastart)) {
+ llfun <- function(ss, y, N, w) {
+ sum(c(w) * dzipf(x = y, N=extra$N, s=ss, log = TRUE))
}
- }, list( .link = link, .earg = earg ))),
- vfamily = c("zipf"),
- deriv = eval(substitute(expression({
- ss = eta2theta(eta, .link, earg = .earg )
- fred1 = gharmonic(extra$N, ss)
- fred2 = gharmonic(extra$N, ss, lognexp=1)
- dl.dss = -log(y) + fred2 / fred1
- dss.deta = dtheta.deta(ss, .link, earg = .earg )
- d2ss.deta2 = d2theta.deta2(ss, .link, earg = .earg )
- c(w) * dl.dss * dss.deta
- }), list( .link = link, .earg = earg ))),
- weight = expression({
- d2l.dss = gharmonic(extra$N, ss, lognexp=2) / fred1 - (fred2/fred1)^2
- wz = c(w) * (dss.deta^2 * d2l.dss - d2ss.deta2 * dl.dss)
- wz
- }))
+ ss.init = if (length( .init.s )) .init.s else
+ getInitVals(gvals = seq(0.1, 3.0, length.out = 19),
+ llfun=llfun,
+ y = y, N=extra$N, w = w)
+ ss.init = rep(ss.init, length = length(y))
+ if ( .link == "loglog") ss.init[ss.init <= 1] = 1.2
+ etastart <- theta2eta(ss.init, .link , earg = .earg )
+ }
+ }), list( .link = link, .earg = earg, .init.s = init.s, .N = N ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ ss = eta2theta(eta, .link , earg = .earg )
+ gharmonic(extra$N, s=ss - 1) / gharmonic(extra$N, s=ss)
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$expected <- FALSE
+ misc$link <- c(s = .link)
+ misc$earg <- list(s = .earg )
+ misc$N <- extra$N
+ }), list( .link = link, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ ss = eta2theta(eta, .link , earg = .earg )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dzipf(x = y, N=extra$N, s=ss, log = TRUE))
+ }
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("zipf"),
+ deriv = eval(substitute(expression({
+ ss = eta2theta(eta, .link , earg = .earg )
+ fred1 = gharmonic(extra$N, ss)
+ fred2 = gharmonic(extra$N, ss, lognexp = 1)
+ dl.dss = -log(y) + fred2 / fred1
+ dss.deta = dtheta.deta(ss, .link , earg = .earg )
+ d2ss.deta2 = d2theta.deta2(ss, .link , earg = .earg )
+ c(w) * dl.dss * dss.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = expression({
+ d2l.dss = gharmonic(extra$N, ss, lognexp = 2) / fred1 - (fred2/fred1)^2
+ wz = c(w) * (dss.deta^2 * d2l.dss - d2ss.deta2 * dl.dss)
+ wz
+ }))
}
@@ -1164,78 +1331,92 @@ cauchy.control <- function(save.weight = TRUE, ...)
list(save.weight = save.weight)
}
- cauchy = function(llocation = "identity", lscale = "loge",
- elocation = list(), escale = list(),
- ilocation = NULL, iscale = NULL,
- iprobs = seq(0.2, 0.8, by=0.2),
- imethod = 1, nsimEIM = NULL, zero = 2)
+
+ cauchy <- function(llocation = "identity", lscale = "loge",
+ ilocation = NULL, iscale = NULL,
+ iprobs = seq(0.2, 0.8, by=0.2),
+ imethod = 1, nsimEIM = NULL, zero = 2)
{
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 3)
- stop("argument 'imethod' must be 1 or 2 or 3")
- if (!is.list(elocation)) elocation = list()
- if (!is.list(escale)) escale = list()
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
- if (length(nsimEIM) &&
- (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE) ||
- nsimEIM <= 50))
- stop("argument 'nsimEIM' should be an integer greater than 50")
- if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
- stop("bad input for argument 'iscale'")
- if (!is.Numeric(iprobs, positive = TRUE) || max(iprobs) >= 1)
- stop("bad input for argument 'iprobs'")
- new("vglmff",
- blurb = c("Two parameter Cauchy distribution ",
- "(location & scale unknown)\n\n",
- "Link: ",
- namesof("location", llocation, earg = elocation), "\n",
- namesof("scale", lscale, earg = escale), "\n\n",
- "Mean: NA\n",
- "Variance: NA"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- predictors.names = c(
- namesof("location", .llocation, earg = .elocation, tag = FALSE),
- namesof("scale", .lscale, earg = .escale, tag = FALSE))
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
+ ilocat <- ilocation
- if (!length(etastart)) {
- loc.init = if (length( .ilocation)) .ilocation else {
- if ( .imethod == 2) median(rep(y, w)) else
- if ( .imethod == 3) y else {
- cauchy2.Loglikfun = function(loc, y, x, w, extraargs) {
- iprobs = .iprobs
- qy = quantile(rep(y, w), probs=iprobs)
- ztry = tan(pi*(iprobs-0.5))
- btry = (qy - loc) / ztry
- scal = median(btry, na.rm = TRUE)
- if (scal <= 0) scal = 0.1
- sum(w * dcauchy(x = y, loc=loc, scale=scal, log = TRUE))
- }
- loc.grid = c(quantile(y, probs=seq(0.1, 0.9, by=0.05)))
- try.this = getMaxMin(loc.grid, objfun = cauchy2.Loglikfun,
- y = y, x = x, w = w)
- try.this = rep(c(try.this), length.out = n)
- try.this
- }
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 3)
+ stop("argument 'imethod' must be 1 or 2 or 3")
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+ if (length(nsimEIM) &&
+ (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE) ||
+ nsimEIM <= 50))
+ stop("argument 'nsimEIM' should be an integer greater than 50")
+ if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
+ stop("bad input for argument 'iscale'")
+ if (!is.Numeric(iprobs, positive = TRUE) || max(iprobs) >= 1)
+ stop("bad input for argument 'iprobs'")
+
+
+
+ new("vglmff",
+ blurb = c("Two parameter Cauchy distribution ",
+ "(location & scale unknown)\n\n",
+ "Link: ",
+ namesof("location", llocat, earg = elocat), "\n",
+ namesof("scale", lscale, earg = escale), "\n\n",
+ "Mean: NA\n",
+ "Variance: NA"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ predictors.names <- c(
+ namesof("location", .llocat , earg = .elocat , tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE))
+
+
+
+ w.y.check(w = w, y = y)
+
+
+
+ if (!length(etastart)) {
+ loc.init = if (length( .ilocat)) .ilocat else {
+ if ( .imethod == 2) median(rep(y, w)) else
+ if ( .imethod == 3) y else {
+ cauchy2.Loglikfun <- function(loc, y, x, w, extraargs) {
+ iprobs = .iprobs
+ qy = quantile(rep(y, w), probs = iprobs)
+ ztry = tan(pi*(iprobs-0.5))
+ btry = (qy - loc) / ztry
+ scal = median(btry, na.rm = TRUE)
+ if (scal <= 0) scal = 0.1
+ sum(c(w) * dcauchy(x = y, loc = loc, scale = scal,
+ log = TRUE))
+ }
+ loc.grid = c(quantile(y, probs = seq(0.1, 0.9, by=0.05)))
+ try.this = getMaxMin(loc.grid, objfun = cauchy2.Loglikfun,
+ y = y, x = x, w = w)
+ try.this = rep(c(try.this), length.out = n)
+ try.this
}
- loc.init = rep(c(loc.init), length.out = n)
+ }
+ loc.init = rep(c(loc.init), length.out = n)
- sca.init = if (length( .iscale)) .iscale else {
+ sca.init = if (length( .iscale )) .iscale else {
iprobs = .iprobs
- qy = quantile(rep(y, w), probs=iprobs)
+ qy = quantile(rep(y, w), probs = iprobs)
ztry = tan(pi*(iprobs-0.5))
btry = (qy - loc.init[1]) / ztry
sca.init = median(btry, na.rm = TRUE)
@@ -1244,49 +1425,50 @@ cauchy.control <- function(save.weight = TRUE, ...)
}
sca.init = rep(c(sca.init), length.out = n)
- if ( .llocation == "loge") loc.init = abs(loc.init)+0.01
- etastart = cbind(theta2eta(loc.init, .llocation, earg = .elocation),
- theta2eta(sca.init, .lscale, earg = .escale))
+ if ( .llocat == "loge") loc.init = abs(loc.init)+0.01
+ etastart <-
+ cbind(theta2eta(loc.init, .llocat , earg = .elocat ),
+ theta2eta(sca.init, .lscale , earg = .escale ))
}
- }), list( .ilocation = ilocation,
- .elocation = elocation, .llocation = llocation,
+ }), list( .ilocat = ilocat,
+ .elocat = elocat, .llocat = llocat,
.iscale = iscale, .escale = escale, .lscale = lscale,
- .iprobs=iprobs, .imethod = imethod ))),
+ .iprobs = iprobs, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[, 1], .llocation, earg = .elocation)
- }, list( .llocation = llocation,
- .elocation = elocation ))),
+ eta2theta(eta[, 1], .llocat , earg = .elocat )
+ }, list( .llocat = llocat,
+ .elocat = elocat ))),
last = eval(substitute(expression({
misc$expected = TRUE
- misc$link = c("location" = .llocation, "scale" =.lscale)
- misc$earg = list("location" = .elocation, "scale" = .escale)
- misc$imethod = .imethod
- }), list( .escale = escale, .elocation = elocation,
+ misc$link <- c("location" = .llocat , "scale" =.lscale)
+ misc$earg <- list("location" = .elocat , "scale" = .escale )
+ misc$imethod <- .imethod
+ }), list( .escale = escale, .elocat = elocat,
.imethod = imethod,
- .llocation = llocation, .lscale = lscale ))),
+ .llocat = llocat, .lscale = lscale ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- location = eta2theta(eta[, 1], .llocation, earg = .elocation)
- myscale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ locat = eta2theta(eta[, 1], .llocat , earg = .elocat )
+ myscale = eta2theta(eta[, 2], .lscale , earg = .escale )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dcauchy(x = y, loc=location, sc=myscale, log = TRUE))
+ sum(c(w) * dcauchy(x = y, loc=locat, sc=myscale, log = TRUE))
}
}, list( .escale = escale, .lscale = lscale,
- .elocation = elocation, .llocation = llocation ))),
+ .elocat = elocat, .llocat = llocat ))),
vfamily = c("cauchy"),
deriv = eval(substitute(expression({
- location = eta2theta(eta[, 1], .llocation, earg = .elocation)
- myscale = eta2theta(eta[, 2], .lscale, earg = .escale)
- dlocation.deta = dtheta.deta(location, .llocation, earg = .elocation)
- dscale.deta = dtheta.deta(myscale, .lscale, earg = .escale)
+ location = eta2theta(eta[, 1], .llocat , earg = .elocat )
+ myscale = eta2theta(eta[, 2], .lscale , earg = .escale )
+ dlocation.deta = dtheta.deta(location, .llocat , earg = .elocat )
+ dscale.deta = dtheta.deta(myscale, .lscale , earg = .escale )
Z = (y-location) / myscale
dl.dlocation = 2 * Z / ((1 + Z^2) * myscale)
dl.dscale = (Z^2 - 1) / ((1 + Z^2) * myscale)
c(w) * cbind(dl.dlocation * dlocation.deta,
dl.dscale * dscale.deta)
}), list( .escale = escale, .lscale = lscale,
- .elocation = elocation, .llocation = llocation ))),
+ .elocat = elocat, .llocat = llocat ))),
weight = eval(substitute(expression({
run.varcov = 0
ind1 = iam(NA, NA, M = M, both = TRUE, diag = TRUE)
@@ -1300,23 +1482,25 @@ cauchy.control <- function(save.weight = TRUE, ...)
rm(ysim)
temp3 = matrix(c(dl.dlocation, dl.dscale), n, 2)
run.varcov = ((ii-1) * run.varcov +
- temp3[,ind1$row.index]*temp3[,ind1$col.index]) / ii
+ temp3[, ind1$row.index] *
+ temp3[, ind1$col.index]) / ii
}
wz = if (intercept.only)
matrix(colMeans(run.varcov),
n, ncol(run.varcov), byrow = TRUE) else run.varcov
- wz = wz * dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
+ wz = wz * dthetas.detas[, ind1$row] *
+ dthetas.detas[, ind1$col]
wz = c(w) * matrix(wz, n, dimm(M))
} else {
wz = cbind(matrix(0.5 / myscale^2,n,2), matrix(0,n,1)) *
- dthetas.detas[,ind1$row] * dthetas.detas[,ind1$col]
+ dthetas.detas[, ind1$row] * dthetas.detas[, ind1$col]
wz = c(w) * wz[, 1:M] # diagonal wz
}
wz
}), list( .escale = escale, .lscale = lscale, .nsimEIM = nsimEIM,
- .elocation = elocation, .llocation = llocation ))))
+ .elocat = elocat, .llocat = llocat ))))
}
@@ -1325,280 +1509,367 @@ cauchy.control <- function(save.weight = TRUE, ...)
- cauchy1 = function(scale.arg = 1, llocation = "identity",
- elocation = list(),
- ilocation = NULL, imethod = 1)
+ cauchy1 <- function(scale.arg = 1, llocation = "identity",
+ ilocation = NULL, imethod = 1)
{
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (!is.Numeric(scale.arg, positive = TRUE))
- stop("bad input for 'scale.arg'")
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 3)
- stop("argument 'imethod' must be 1 or 2 or 3")
- if (!is.list(elocation)) elocation = list()
-
- new("vglmff",
- blurb = c("One-parameter Cauchy distribution ",
- "(location unknown, scale known)\n\n",
- "Link: ",
- namesof("location", llocation, earg = elocation), "\n\n",
- "Mean: NA\n",
- "Variance: NA"),
- initialize = eval(substitute(expression({
- predictors.names = namesof("location", .llocation,
- earg = .elocation, tag = FALSE)
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- if (!length(etastart)) {
- loc.init = if (length( .ilocation)) .ilocation else {
- if ( .imethod == 2) median(rep(y, w)) else
- if ( .imethod == 3) y else {
- cauchy1.Loglikfun = function(loc, y, x, w, extraargs) {
- scal = extraargs
- sum(w * dcauchy(x = y, loc = loc, scale = scal,
- log = TRUE))
- }
- loc.grid = quantile(y, probs = seq(0.1, 0.9,
- by = 0.05))
- try.this = getMaxMin(loc.grid,
- objfun = cauchy1.Loglikfun,
- y = y, x = x, w = w,
- extraargs = .scale.arg )
- try.this = rep(try.this, length.out = n)
- try.this
- }
- }
- loc.init = rep(loc.init, length.out = n)
- if ( .llocation == "loge") loc.init = abs(loc.init)+0.01
- etastart = theta2eta(loc.init, .llocation, earg = .elocation)
- }
- }), list( .scale.arg=scale.arg, .ilocation = ilocation,
- .elocation = elocation, .llocation = llocation,
- .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta, .llocation, earg = .elocation)
- }, list( .llocation = llocation,
- .elocation = elocation ))),
- last = eval(substitute(expression({
- misc$expected = TRUE
- misc$link = c("location" = .llocation)
- misc$earg = list("location" = .elocation )
- misc$scale.arg = .scale.arg
- }), list( .scale.arg=scale.arg, .elocation = elocation,
- .llocation = llocation ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- location = eta2theta(eta, .llocation, earg = .elocation)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dcauchy(x = y, loc=location, scale = .scale.arg, log = TRUE))
- }
- }, list( .scale.arg=scale.arg, .elocation = elocation,
- .llocation = llocation ))),
- vfamily = c("cauchy1"),
- deriv = eval(substitute(expression({
- location = eta2theta(eta, .llocation, earg = .elocation)
- temp = (y-location)/.scale.arg
- dl.dlocation = 2 * temp / ((1 + temp^2) * .scale.arg)
- dlocation.deta = dtheta.deta(location, .llocation, earg = .elocation)
- c(w) * dl.dlocation * dlocation.deta
- }), list( .scale.arg=scale.arg, .elocation = elocation,
- .llocation = llocation ))),
- weight = eval(substitute(expression({
- wz = c(w) * dlocation.deta^2 / ( .scale.arg^2 * 2)
- wz
- }), list( .scale.arg=scale.arg, .elocation = elocation,
- .llocation = llocation ))))
-}
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
+ ilocat <- ilocation
+ if (!is.Numeric(scale.arg, positive = TRUE))
+ stop("bad input for 'scale.arg'")
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 3)
+ stop("argument 'imethod' must be 1 or 2 or 3")
- logistic1 = function(llocation = "identity",
- elocation = list(),
- scale.arg = 1, imethod = 1)
-{
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (!is.Numeric(scale.arg, allowable.length = 1, positive = TRUE))
- stop("'scale.arg' must be a single positive number")
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
- if (!is.list(elocation)) elocation = list()
- new("vglmff",
- blurb = c("One-parameter logistic distribution ",
+ new("vglmff",
+ blurb = c("One-parameter Cauchy distribution ",
"(location unknown, scale known)\n\n",
"Link: ",
- namesof("location", llocation, earg = elocation), "\n\n",
- "Mean: location", "\n",
- "Variance: (pi*scale)^2 / 3"),
- initialize = eval(substitute(expression({
- predictors.names = namesof("location", .llocation,
- earg = .elocation, tag = FALSE)
+ namesof("location", llocat, earg = elocat), "\n\n",
+ "Mean: NA\n",
+ "Variance: NA"),
+ initialize = eval(substitute(expression({
+ predictors.names <- namesof("location", .llocat ,
+ earg = .elocat , tag = FALSE)
+
+
+ w.y.check(w = w, y = y)
+
+
+
if (!length(etastart)) {
- location.init = if ( .imethod == 1) y else median(rep(y, w))
- location.init = rep(location.init, length.out = n)
- if ( .llocation == "loge")
- location.init = abs(location.init) + 0.001
- etastart = theta2eta(location.init, .llocation, earg = .elocation)
- }
- }), list( .imethod = imethod, .llocation = llocation,
- .elocation = elocation ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta, .llocation, earg = .elocation)
- }, list( .llocation = llocation,
- .elocation = elocation ))),
- last = eval(substitute(expression({
- misc$expected = TRUE
- misc$link = c(location = .llocation)
- misc$earg = list(location = .elocation )
- misc$scale.arg = .scale.arg
- }), list( .llocation = llocation,
- .elocation = elocation, .scale.arg=scale.arg ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- location = eta2theta(eta, .llocation, earg = .elocation)
- zedd = (y-location)/.scale.arg
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dlogis(x = y, location = location,
- scale = .scale.arg, log = TRUE))
+ loc.init = if (length( .ilocat)) .ilocat else {
+ if ( .imethod == 2) median(rep(y, w)) else
+ if ( .imethod == 3) y else {
+ cauchy1.Loglikfun <- function(loc, y, x, w, extraargs) {
+ scal = extraargs
+ sum(c(w) * dcauchy(x = y, loc = loc, scale = scal,
+ log = TRUE))
+ }
+ loc.grid = quantile(y, probs = seq(0.1, 0.9,
+ by = 0.05))
+ try.this = getMaxMin(loc.grid,
+ objfun = cauchy1.Loglikfun,
+ y = y, x = x, w = w,
+ extraargs = .scale.arg )
+ try.this = rep(try.this, length.out = n)
+ try.this
+ }
+ }
+ loc.init = rep(loc.init, length.out = n)
+ if ( .llocat == "loge") loc.init = abs(loc.init)+0.01
+ etastart <-
+ theta2eta(loc.init, .llocat , earg = .elocat )
}
- }, list( .llocation = llocation,
- .elocation = elocation, .scale.arg=scale.arg ))),
- vfamily = c("logistic1"),
- deriv = eval(substitute(expression({
- location = eta2theta(eta, .llocation, earg = .elocation)
- ezedd = exp(-(y-location)/.scale.arg)
- dl.dlocation = (1 - ezedd) / ((1 + ezedd) * .scale.arg)
- dlocation.deta = dtheta.deta(location, .llocation, earg = .elocation)
- c(w) * dl.dlocation * dlocation.deta
- }), list( .llocation = llocation,
- .elocation = elocation, .scale.arg=scale.arg ))),
- weight = eval(substitute(expression({
- wz = c(w) * dlocation.deta^2 / ( .scale.arg^2 * 3)
- wz
- }), list( .scale.arg=scale.arg ))))
+ }), list( .scale.arg = scale.arg, .ilocat = ilocat,
+ .elocat = elocat, .llocat = llocat,
+ .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta, .llocat , earg = .elocat )
+ }, list( .llocat = llocat,
+ .elocat = elocat ))),
+ last = eval(substitute(expression({
+ misc$link <- c("location" = .llocat)
+ misc$earg <- list("location" = .elocat )
+
+ misc$expected = TRUE
+ misc$scale.arg = .scale.arg
+ }), list( .scale.arg = scale.arg, .elocat = elocat,
+ .llocat = llocat ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ locat = eta2theta(eta, .llocat , earg = .elocat )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dcauchy(x = y, loc=locat, scale = .scale.arg,
+ log = TRUE))
+ }
+ }, list( .scale.arg = scale.arg, .elocat = elocat,
+ .llocat = llocat ))),
+ vfamily = c("cauchy1"),
+ deriv = eval(substitute(expression({
+ locat = eta2theta(eta, .llocat , earg = .elocat )
+ temp = (y-locat)/.scale.arg
+ dl.dlocat = 2 * temp / ((1 + temp^2) * .scale.arg)
+
+ dlocation.deta = dtheta.deta(locat, .llocat , earg = .elocat )
+
+ c(w) * dl.dlocat * dlocation.deta
+ }), list( .scale.arg = scale.arg, .elocat = elocat,
+ .llocat = llocat ))),
+ weight = eval(substitute(expression({
+ wz = c(w) * dlocation.deta^2 / ( .scale.arg^2 * 2)
+ wz
+ }), list( .scale.arg = scale.arg, .elocat = elocat,
+ .llocat = llocat ))))
}
- erlang = function(shape.arg, link = "loge", earg = list(), imethod = 1)
+
+
+ logistic1 <- function(llocation = "identity",
+ scale.arg = 1, imethod = 1)
{
+ if (!is.Numeric(scale.arg, allowable.length = 1, positive = TRUE))
+ stop("'scale.arg' must be a single positive number")
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
- if (!is.Numeric(shape.arg, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE))
- stop("'shape' must be a positive integer")
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
- new("vglmff",
- blurb = c("Erlang distribution\n\n",
- "Link: ", namesof("scale", link, earg = earg), "\n", "\n",
- "Mean: shape * scale", "\n",
- "Variance: shape * scale^2"),
- initialize = eval(substitute(expression({
- if (ncol(y <- as.matrix(y)) > 1)
- stop("erlang cannot handle matrix responses yet")
- if (any(y < 0))
- stop("all y values must be >= 0")
- predictors.names =
- namesof("scale", .link, earg = .earg, tag = FALSE)
- if (!length(etastart)) {
- if ( .imethod == 1)
- sc.init = y / .shape.arg
- if ( .imethod==2) {
- sc.init = median(y) / .shape.arg
- sc.init = rep(sc.init, length = n)
- }
- etastart = theta2eta(sc.init, .link, earg = .earg )
- }
- }), list( .link = link, .earg = earg,
- .shape.arg=shape.arg, .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- sc = eta2theta(eta, .link, earg = .earg )
- .shape.arg * sc
- }, list( .link = link, .earg = earg, .shape.arg=shape.arg ))),
- last = eval(substitute(expression({
- misc$expected = TRUE
- misc$link = c(scale = .link)
- misc$earg = list(scale = .earg )
- misc$shape.arg = .shape.arg
- }), list( .link = link, .earg = earg, .shape.arg=shape.arg ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- sc = eta2theta(eta, .link, earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * (( .shape.arg - 1) * log(y) - y / sc - .shape.arg * log(sc) -
- lgamma( .shape.arg )))
- }
- }, list( .link = link, .earg = earg, .shape.arg=shape.arg ))),
- vfamily = c("erlang"),
- deriv = eval(substitute(expression({
- sc = eta2theta(eta, .link, earg = .earg )
- dl.dsc = (y / sc - .shape.arg) / sc
- dsc.deta = dtheta.deta(sc, .link, earg = .earg )
- c(w) * dl.dsc * dsc.deta
- }), list( .link = link, .earg = earg, .shape.arg=shape.arg ))),
- weight = eval(substitute(expression({
- ed2l.dsc2 = .shape.arg / sc^2
- wz = c(w) * dsc.deta^2 * ed2l.dsc2
- wz
- }), list( .earg = earg, .shape.arg=shape.arg ))))
+ new("vglmff",
+ blurb = c("One-parameter logistic distribution ",
+ "(location unknown, scale known)\n\n",
+ "Link: ",
+ namesof("location", llocat, earg = elocat), "\n\n",
+ "Mean: location", "\n",
+ "Variance: (pi*scale)^2 / 3"),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y)
+
+
+ predictors.names <- namesof("location", .llocat ,
+ earg = .elocat , tag = FALSE)
+
+
+ if (!length(etastart)) {
+ locat.init = if ( .imethod == 1) y else median(rep(y, w))
+ locat.init = rep(locat.init, length.out = n)
+ if ( .llocat == "loge")
+ locat.init = abs(locat.init) + 0.001
+ etastart =
+ theta2eta(locat.init, .llocat , earg = .elocat )
+ }
+ }), list( .imethod = imethod, .llocat = llocat,
+ .elocat = elocat ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta, .llocat , earg = .elocat )
+ }, list( .llocat = llocat,
+ .elocat = elocat ))),
+ last = eval(substitute(expression({
+ misc$expected = TRUE
+ misc$link <- c(location = .llocat)
+ misc$earg <- list(location = .elocat )
+ misc$scale.arg = .scale.arg
+ }), list( .llocat = llocat,
+ .elocat = elocat, .scale.arg = scale.arg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ locat = eta2theta(eta, .llocat , earg = .elocat )
+ zedd = (y-locat)/.scale.arg
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dlogis(x = y, locat = locat,
+ scale = .scale.arg, log = TRUE))
+ }
+ }, list( .llocat = llocat,
+ .elocat = elocat, .scale.arg = scale.arg ))),
+ vfamily = c("logistic1"),
+ deriv = eval(substitute(expression({
+ locat = eta2theta(eta, .llocat , earg = .elocat )
+
+ ezedd = exp(-(y-locat) / .scale.arg )
+ dl.dlocat = (1 - ezedd) / ((1 + ezedd) * .scale.arg)
+ dlocat.deta = dtheta.deta(locat, .llocat ,
+ earg = .elocat )
+
+ c(w) * dl.dlocat * dlocat.deta
+ }), list( .llocat = llocat,
+ .elocat = elocat, .scale.arg = scale.arg ))),
+ weight = eval(substitute(expression({
+ wz = c(w) * dlocat.deta^2 / ( .scale.arg^2 * 3)
+ wz
+ }), list( .scale.arg = scale.arg ))))
}
+ erlang <-
+ function(shape.arg, link = "loge",
+ imethod = 1, zero = NULL)
+{
+
+ if (!is.Numeric(shape.arg, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
+ stop("'shape' must be a positive integer")
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 3)
+ stop("argument 'imethod' must be 1 or 2 or 3")
+
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
-dbort = function(x, Qsize = 1, a=0.5, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
- if (!is.Numeric(x))
- stop("bad input for argument 'x'")
- if (!is.Numeric(Qsize, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'Qsize'")
- if (!is.Numeric(a, positive = TRUE) || max(a) >= 1)
- stop("bad input for argument 'a'")
- N = max(length(x), length(Qsize), length(a))
- x = rep(x, length.out = N);
- Qsize = rep(Qsize, length.out = N);
- a = rep(a, length.out = N);
- xok = (x >= Qsize) & (x == round(x)) & (a > 0) & (a < 1)
- ans = rep(if (log.arg) log(0) else 0, length.out = N) # loglikelihood
- ans[xok] = lgamma(1 + Qsize[xok]) - lgamma(x[xok] + 1 - Qsize[xok]) +
- (x[xok] - 1 - Qsize[xok]) * log(x[xok]) +
- (x[xok] - Qsize[xok]) * log(a[xok]) - a[xok] * x[xok]
- if (!log.arg) {
- ans[xok] = exp(ans[xok])
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+ new("vglmff",
+ blurb = c("Erlang distribution\n\n",
+ "Link: ", namesof("scale", link, earg = earg), "\n", "\n",
+ "Mean: shape * scale", "\n",
+ "Variance: shape * scale^2"),
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 1
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ zero = .zero )
+ }, list( .zero = zero ))),
+
+
+ initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ ncoly <- ncol(y)
+ Musual <- 1
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+ mynames1 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ namesof(mynames1, .link , earg = .earg , tag = FALSE)
+
+
+ if (!length(etastart)) {
+ if ( .imethod == 1) {
+ sc.init = y / .shape.arg
+ }
+ if ( .imethod == 2) {
+ sc.init = (colSums(y * w) / colSums(w))/ .shape.arg
+ }
+ if ( .imethod == 3) {
+ sc.init = median(y) / .shape.arg
+ }
+
+ if ( !is.matrix(sc.init))
+ sc.init = matrix(sc.init, n, M, byrow = TRUE)
+
+
+ etastart <-
+ theta2eta(sc.init, .link , earg = .earg )
}
- ans
+ }), list( .link = link, .earg = earg,
+ .shape.arg = shape.arg, .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ sc = eta2theta(eta, .link , earg = .earg )
+ .shape.arg * sc
+ }, list( .link = link, .earg = earg, .shape.arg = shape.arg ))),
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+ misc$link <- c(rep( .link , length = ncoly))
+ names(misc$link) <- mynames1
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- mynames1
+ for(ii in 1:ncoly) {
+ misc$earg[[ii]] <- .earg
+ }
+
+ misc$Musual <- Musual
+ misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
+ misc$shape.arg <- .shape.arg
+ }), list( .link = link, .earg = earg, .shape.arg = shape.arg ))),
+
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ sc = eta2theta(eta, .link , earg = .earg )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * (( .shape.arg - 1) * log(y) - y / sc -
+ .shape.arg * log(sc) - lgamma( .shape.arg )))
+ }
+ }, list( .link = link, .earg = earg, .shape.arg = shape.arg ))),
+ vfamily = c("erlang"),
+ deriv = eval(substitute(expression({
+ sc = eta2theta(eta, .link , earg = .earg )
+ dl.dsc = (y / sc - .shape.arg) / sc
+ dsc.deta = dtheta.deta(sc, .link , earg = .earg )
+ c(w) * dl.dsc * dsc.deta
+ }), list( .link = link, .earg = earg, .shape.arg = shape.arg ))),
+ weight = eval(substitute(expression({
+ ed2l.dsc2 = .shape.arg / sc^2
+ wz = c(w) * dsc.deta^2 * ed2l.dsc2
+ wz
+ }), list( .earg = earg, .shape.arg = shape.arg ))))
}
-rbort = function(n, Qsize = 1, a = 0.5) {
+
+
+
+dbort <- function(x, Qsize = 1, a = 0.5, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
+
+ if (!is.Numeric(x))
+ stop("bad input for argument 'x'")
+ if (!is.Numeric(Qsize, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'Qsize'")
+ if (!is.Numeric(a, positive = TRUE) || max(a) >= 1)
+ stop("bad input for argument 'a'")
+ N = max(length(x), length(Qsize), length(a))
+ x = rep(x, length.out = N);
+ Qsize = rep(Qsize, length.out = N);
+ a = rep(a, length.out = N);
+
+ xok = (x >= Qsize) & (x == round(x)) & (a > 0) & (a < 1)
+ ans = rep(if (log.arg) log(0) else 0, length.out = N) # loglikelihood
+ ans[xok] = lgamma(1 + Qsize[xok]) - lgamma(x[xok] + 1 - Qsize[xok]) +
+ (x[xok] - 1 - Qsize[xok]) * log(x[xok]) +
+ (x[xok] - Qsize[xok]) * log(a[xok]) - a[xok] * x[xok]
+ if (!log.arg) {
+ ans[xok] = exp(ans[xok])
+ }
+ ans
+}
+
+
+rbort <- function(n, Qsize = 1, a = 0.5) {
use.n = if ((length.n <- length(n)) > 1) length.n else
if (!is.Numeric(n, integer.valued = TRUE,
@@ -1627,216 +1898,251 @@ rbort = function(n, Qsize = 1, a = 0.5) {
}
- borel.tanner = function(Qsize = 1, link = "logit",
- earg = list(), imethod = 1)
+
+ borel.tanner <- function(Qsize = 1, link = "logit",
+ imethod = 1)
{
- if (!is.Numeric(Qsize, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'Qsize'")
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 4)
- stop("argument 'imethod' must be 1 or 2, 3 or 4")
+ if (!is.Numeric(Qsize, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'Qsize'")
- new("vglmff",
- blurb = c("Borel-Tanner distribution\n\n",
- "Link: ",
- namesof("a", link, earg = earg), "\n\n",
- "Mean: Qsize/(1-a)",
- "\n",
- "Variance: Qsize*a / (1-a)^3"),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- if (any(y < .Qsize))
- stop("all y values must be >= ", .Qsize)
- if (any(y != round(y)))
- warning("response should be integer-valued")
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
- predictors.names = namesof("a", .link, earg = .earg, tag = FALSE)
- if (!length(etastart)) {
- a.init = switch(as.character( .imethod ),
- "1" = 1 - .Qsize / (y+1/8),
- "2" = rep(1 - .Qsize / weighted.mean(y, w), length.out = n),
- "3" = rep(1 - .Qsize / median(y), length.out = n),
- "4" = rep(0.5, length.out = n))
- etastart = theta2eta(a.init, .link, earg = .earg )
- }
- }), list( .link = link, .earg = earg, .Qsize=Qsize,
- .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- a = eta2theta(eta, .link, earg = .earg )
- .Qsize / (1 - a)
- }, list( .link = link, .earg = earg, .Qsize=Qsize ))),
- last = eval(substitute(expression({
- misc$expected = TRUE
- misc$link = c(a = .link)
- misc$earg = list(a = .earg )
- misc$Qsize = .Qsize
- }), list( .link = link, .earg = earg, .Qsize=Qsize ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = eta2theta(eta, .link, earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dbort(x = y, Qsize= .Qsize, a=aa, log = TRUE))
- }
- }, list( .link = link, .earg = earg, .Qsize=Qsize ))),
- vfamily = c("borel.tanner"),
- deriv = eval(substitute(expression({
- a = eta2theta(eta, .link, earg = .earg )
- dl.da = (y- .Qsize)/a - y
- da.deta = dtheta.deta(a, .link, earg = .earg )
- c(w) * dl.da * da.deta
- }), list( .link = link, .earg = earg, .Qsize=Qsize ))),
- weight = eval(substitute(expression({
- ed2l.da2 = .Qsize / (a*(1-a))
- wz = c(w) * da.deta^2 * ed2l.da2
- wz
- }), list( .Qsize=Qsize ))))
-}
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 4)
+ stop("argument 'imethod' must be 1 or 2, 3 or 4")
-dfelix = function(x, a = 0.25, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
- if (!is.Numeric(x))
- stop("bad input for argument 'x'")
- if (!is.Numeric(a, positive = TRUE))
- stop("bad input for argument 'a'")
- N = max(length(x), length(a))
- x = rep(x, length.out = N);
- a = rep(a, length.out = N);
+ new("vglmff",
+ blurb = c("Borel-Tanner distribution\n\n",
+ "Link: ",
+ namesof("a", link, earg = earg), "\n\n",
+ "Mean: Qsize/(1-a)",
+ "\n",
+ "Variance: Qsize*a / (1-a)^3"),
+ initialize = eval(substitute(expression({
+ if (any(y < .Qsize ))
+ stop("all y values must be >= ", .Qsize )
+
+
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ Is.integer.y = TRUE)
+
- xok = (x %% 2 == 1) & (x == round(x)) & (x >= 1) & (a > 0) & (a < 0.5)
- ans = rep(if (log.arg) log(0) else 0, length.out = N) # loglikelihood
- ans[xok] = ((x[xok]-3)/2) * log(x[xok]) + ((x[xok]-1)/2) * log(a[xok]) -
- lgamma(x[xok]/2 + 0.5) - a[xok] * x[xok]
- if (!log.arg) {
- ans[xok] = exp(ans[xok])
+ predictors.names <- namesof("a", .link , earg = .earg , tag = FALSE)
+
+ if (!length(etastart)) {
+ a.init = switch(as.character( .imethod ),
+ "1" = 1 - .Qsize / (y+1/8),
+ "2" = rep(1 - .Qsize / weighted.mean(y, w), length.out = n),
+ "3" = rep(1 - .Qsize / median(y), length.out = n),
+ "4" = rep(0.5, length.out = n))
+ etastart =
+ theta2eta(a.init, .link , earg = .earg )
}
- ans
+ }), list( .link = link, .earg = earg, .Qsize = Qsize,
+ .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ aa = eta2theta(eta, .link , earg = .earg )
+ .Qsize / (1 - aa)
+ }, list( .link = link, .earg = earg, .Qsize = Qsize ))),
+ last = eval(substitute(expression({
+ misc$link = c(a = .link)
+
+ misc$earg <- list(a = .earg )
+
+ misc$expected = TRUE
+ misc$Qsize = .Qsize
+ }), list( .link = link, .earg = earg, .Qsize = Qsize ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa = eta2theta(eta, .link , earg = .earg )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dbort(x = y, Qsize = .Qsize, a = aa, log = TRUE))
+ }
+ }, list( .link = link, .earg = earg, .Qsize = Qsize ))),
+ vfamily = c("borel.tanner"),
+ deriv = eval(substitute(expression({
+ aa = eta2theta(eta, .link , earg = .earg )
+ dl.da = (y - .Qsize) / aa - y
+ da.deta = dtheta.deta(aa, .link , earg = .earg )
+ c(w) * dl.da * da.deta
+ }), list( .link = link, .earg = earg, .Qsize = Qsize ))),
+ weight = eval(substitute(expression({
+ ned2l.da2 = .Qsize / (aa * (1 - aa))
+ wz = c(w) * ned2l.da2 * da.deta^2
+ wz
+ }), list( .Qsize = Qsize ))))
}
- felix = function(link = "elogit",
- earg=if (link == "elogit") list(min = 0, max = 0.5) else list(),
- imethod = 1)
-{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 4)
- stop("argument 'imethod' must be 1 or 2, 3 or 4")
- new("vglmff",
- blurb = c("Felix distribution\n\n",
- "Link: ",
- namesof("a", link, earg = earg), "\n\n",
- "Mean: 1/(1-2*a)"),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- if (any(y < 1) || any((y+1)/2 != round((y+1)/2)))
- warning("response should be positive, odd and integer-valued")
- predictors.names = namesof("a", .link, earg = .earg, tag = FALSE)
+dfelix <- function(x, a = 0.25, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
- if (!length(etastart)) {
- wymean = weighted.mean(y, w)
- a.init = switch(as.character( .imethod ),
- "1" = (y-1+1/8) / (2*(y+1/8)+1/8),
- "2" = rep((wymean-1+1/8) / (2*(wymean+1/8)+1/8),
- length.out = n),
- "3" = rep((median(y)-1+1/8) / (2*(median(y)+1/8)+1/8),
- length.out = n),
- "4" = rep(0.25,
- length.out = n))
- etastart = theta2eta(a.init, .link, earg = .earg )
- }
- }), list( .link = link, .earg = earg,
- .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- a = eta2theta(eta, .link, earg = .earg )
- 1 / (1 - 2*a)
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$expected = TRUE
- misc$link = c(a = .link)
- misc$earg = list(a = .earg )
- }), list( .link = link, .earg = earg ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = eta2theta(eta, .link, earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dfelix(x = y, a=aa, log = TRUE))
- }
- }, list( .link = link, .earg = earg ))),
- vfamily = c("felix"),
- deriv = eval(substitute(expression({
- a = eta2theta(eta, .link, earg = .earg )
- dl.da = (y- 1)/(2*a) - y
- da.deta = dtheta.deta(a, .link, earg = .earg )
- c(w) * dl.da * da.deta
- }), list( .link = link, .earg = earg ))),
- weight = eval(substitute(expression({
- ed2l.da2 = 1 / (a*(1-2*a))
- wz = c(w) * da.deta^2 * ed2l.da2
- wz
- }), list( .link = link ))))
+ if (!is.Numeric(x))
+ stop("bad input for argument 'x'")
+ if (!is.Numeric(a, positive = TRUE))
+ stop("bad input for argument 'a'")
+ N = max(length(x), length(a))
+ x = rep(x, length.out = N);
+ a = rep(a, length.out = N);
+
+ xok = (x %% 2 == 1) & (x == round(x)) & (x >= 1) & (a > 0) & (a < 0.5)
+ ans = rep(if (log.arg) log(0) else 0, length.out = N) # loglikelihood
+ ans[xok] = ((x[xok]-3)/2) * log(x[xok]) + ((x[xok]-1)/2) * log(a[xok]) -
+ lgamma(x[xok]/2 + 0.5) - a[xok] * x[xok]
+ if (!log.arg) {
+ ans[xok] = exp(ans[xok])
+ }
+ ans
+}
+
+
+
+ felix <- function(link = elogit(min = 0, max = 0.5), imethod = 1) {
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 4)
+ stop("argument 'imethod' must be 1 or 2, 3 or 4")
+
+
+ new("vglmff",
+ blurb = c("Felix distribution\n\n",
+ "Link: ",
+ namesof("a", link, earg = earg), "\n\n",
+ "Mean: 1/(1-2*a)"),
+ initialize = eval(substitute(expression({
+ if (any(y < 1) ||
+ any((y+1)/2 != round((y+1)/2)))
+ warning("response should be positive, odd and integer-valued")
+
+ w.y.check(w = w, y = y)
+
+
+
+ predictors.names <-
+ namesof("a", .link , earg = .earg , tag = FALSE)
+
+ if (!length(etastart)) {
+ wymean <- weighted.mean(y, w)
+ a.init <- switch(as.character( .imethod ),
+ "1" = (y - 1 + 1/8) / (2 * (y + 1/8) + 1/8),
+ "2" = rep((wymean-1+1/8) / (2*(wymean+1/8)+1/8),
+ length.out = n),
+ "3" = rep((median(y)-1+1/8) / (2*(median(y)+1/8)+1/8),
+ length.out = n),
+ "4" = rep(0.25,
+ length.out = n))
+ etastart <-
+ theta2eta(a.init, .link , earg = .earg )
+ }
+ }), list( .link = link, .earg = earg,
+ .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ aa <- eta2theta(eta, .link , earg = .earg )
+ 1 / (1 - 2 * aa)
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$expected <- TRUE
+
+ misc$link <- c(a = .link)
+
+ misc$earg <- list(a = .earg )
+ }), list( .link = link, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa <- eta2theta(eta, .link , earg = .earg )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dfelix(x = y, a = aa, log = TRUE))
+ }
+ }, list( .link = link, .earg = earg ))),
+ vfamily = c("felix"),
+ deriv = eval(substitute(expression({
+ aa <- eta2theta(eta, .link , earg = .earg )
+ dl.da <- (y - 1) / (2 * aa) - y
+ da.deta <- dtheta.deta(aa, .link , earg = .earg )
+ c(w) * dl.da * da.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+ ned2l.da2 <- 1 / (aa * (1 - 2 * aa))
+ wz <- c(w) * da.deta^2 * ned2l.da2
+ wz
+ }), list( .link = link ))))
}
- betaff = function(A=0, B = 1,
- lmu = if (A == 0 & B == 1) "logit" else "elogit", lphi = "loge",
- emu = if (lmu == "elogit") list(min = A, max = B) else list(),
- ephi = list(),
- imu = NULL, iphi = NULL, imethod = 1, zero = NULL)
+ betaff <-
+ function(A = 0, B = 1,
+ lmu = "logit",
+ lphi = "loge",
+ imu = NULL, iphi = NULL, imethod = 1, zero = NULL)
{
- if (!is.Numeric(A, allowable.length = 1) ||
- !is.Numeric(B, allowable.length = 1) || A >= B)
- stop("A must be < B, and both must be of length one")
- stdbeta = (A == 0 && B == 1)
-
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu = as.character(substitute(lmu))
- if (mode(lphi) != "character" && mode(lphi) != "name")
- lphi = as.character(substitute(lphi))
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
- if (length(imu) && (!is.Numeric(imu, positive = TRUE) ||
- any(imu <= A) || any(imu >= B)))
- stop("bad input for argument 'imu'")
- if (length(iphi) && !is.Numeric(iphi, positive = TRUE))
- stop("bad input for argument 'iphi'")
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
-
- if (!is.list(emu)) emu = list()
- if (!is.list(ephi)) ephi = list()
+
+
+ stdbeta <- (A == 0 && B == 1)
+
+
+ lmu <- as.list(substitute(lmu))
+ emu <- link2list(lmu)
+ lmu <- attr(emu, "function.name")
+
+
+
+ lphi <- as.list(substitute(lphi))
+ ephi <- link2list(lphi)
+ lphi <- attr(ephi, "function.name")
+
+
+ if (!is.Numeric(A, allowable.length = 1) ||
+ !is.Numeric(B, allowable.length = 1) || A >= B)
+ stop("A must be < B, and both must be of length one")
+
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+ if (length(imu) && (!is.Numeric(imu, positive = TRUE) ||
+ any(imu <= A) || any(imu >= B)))
+ stop("bad input for argument 'imu'")
+ if (length(iphi) && !is.Numeric(iphi, positive = TRUE))
+ stop("bad input for argument 'iphi'")
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+
new("vglmff",
blurb = c("Beta distribution parameterized by mu and a ",
"precision parameter\n",
if (stdbeta) paste("f(y) = y^(mu*phi-1) * (1-y)^((1-mu)*phi-1)",
- "/ beta(mu*phi,(1-mu)*phi), 0<y<1, 0<mu<1, phi>0\n\n") else
+ "/ beta(mu*phi,(1-mu)*phi),\n",
+ " 0<y<1, 0<mu<1, phi>0\n\n") else
paste("f(y) = (y-",A,")^(mu1*phi-1) * (",B,
"-y)^(((1-mu1)*phi)-1) / \n(beta(mu1*phi,(1-mu1)*phi) * (",
B, "-", A, ")^(phi-1)),\n",
@@ -1846,63 +2152,71 @@ dfelix = function(x, a = 0.25, log = FALSE) {
"Links: ",
namesof("mu", lmu, earg = emu), ", ",
namesof("phi", lphi, earg = ephi)),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (min(y) <= .A || max(y) >= .B)
- stop("data not within (A, B)")
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names = c(namesof("mu", .lmu, .emu, short = TRUE),
- namesof("phi", .lphi, .ephi, short = TRUE))
- if (!length(etastart)) {
- mu.init = if (is.Numeric( .imu)) .imu else
- {if ( .imethod == 1) weighted.mean(y, w) else
- median(rep(y, w))}
- mu1.init = (mu.init - .A) / ( .B - .A) # In (0,1)
- phi.init = if (is.Numeric( .iphi)) .iphi else
- max(0.01, -1 + ( .B-.A)^2 * mu1.init*(1-mu1.init)/var(y))
- etastart = matrix(0, n, 2)
- etastart[, 1] = theta2eta(mu.init, .lmu, earg = .emu )
- etastart[, 2] = theta2eta(phi.init, .lphi, earg = .ephi )
- }
- }), list( .lmu = lmu, .lphi = lphi, .imu=imu, .iphi=iphi,
- .A = A, .B = B, .emu = emu, .ephi = ephi, .imethod = imethod ))),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (min(y) <= .A || max(y) >= .B)
+ stop("data not within (A, B)")
+
+
+ w.y.check(w = w, y = y)
+
+
+ predictors.names <- c(namesof("mu", .lmu , .emu , short = TRUE),
+ namesof("phi", .lphi , .ephi, short = TRUE))
+ if (!length(etastart)) {
+ mu.init = if (is.Numeric( .imu )) .imu else
+ {if ( .imethod == 1) weighted.mean(y, w) else
+ median(rep(y, w))}
+ mu1.init = (mu.init - .A) / ( .B - .A) # In (0,1)
+ phi.init = if (is.Numeric( .iphi)) .iphi else
+ max(0.01, -1 + ( .B - .A)^2 * mu1.init*(1-mu1.init)/var(y))
+ etastart <- matrix(0, n, 2)
+ etastart[, 1] = theta2eta(mu.init, .lmu , earg = .emu )
+ etastart[, 2] = theta2eta(phi.init, .lphi , earg = .ephi )
+ }
+ }), list( .lmu = lmu, .lphi = lphi, .imu = imu, .iphi = iphi,
+ .A = A, .B = B, .emu = emu, .ephi = ephi,
+ .imethod = imethod ))),
+
linkinv = eval(substitute(function(eta, extra = NULL) {
- mu = eta2theta(eta[, 1], .lmu, .emu )
+ mu = eta2theta(eta[, 1], .lmu , .emu )
mu
}, list( .lmu = lmu, .emu = emu, .A = A, .B = B))),
last = eval(substitute(expression({
- misc$link = c(mu = .lmu, phi = .lphi)
- misc$earg = list(mu = .emu, phi = .ephi)
- misc$limits = c( .A, .B)
- misc$stdbeta = .stdbeta
- }), list( .lmu = lmu, .lphi = lphi, .A = A, .B = B, .emu = emu, .ephi = ephi,
+ misc$link <- c(mu = .lmu , phi = .lphi)
+ misc$earg <- list(mu = .emu , phi = .ephi)
+ misc$limits <- c( .A, .B)
+ misc$stdbeta <- .stdbeta
+ }), list( .lmu = lmu, .lphi = lphi, .A = A, .B = B,
+ .emu = emu, .ephi = ephi,
.stdbeta = stdbeta ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL){
- mu = eta2theta(eta[, 1], .lmu, .emu )
+ mu = eta2theta(eta[, 1], .lmu , .emu )
m1u = if ( .stdbeta ) mu else (mu - .A) / ( .B - .A)
- phi = eta2theta(eta[, 2], .lphi, .ephi )
+ phi = eta2theta(eta[, 2], .lphi , .ephi )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
shape1 = phi * m1u
shape2 = (1 - m1u) * phi
zedd = (y - .A) / ( .B - .A)
- sum(w * (dbeta(x=zedd, shape1 = shape1, shape2 = shape2, log = TRUE) -
+ sum(c(w) * (dbeta(x = zedd, shape1 = shape1, shape2 = shape2,
+ log = TRUE) -
log( abs( .B - .A ))))
}
- }, list( .lmu = lmu, .lphi = lphi, .A = A, .B = B, .emu = emu, .ephi = ephi,
+ }, list( .lmu = lmu, .lphi = lphi, .A = A, .B = B,
+ .emu = emu, .ephi = ephi,
.stdbeta = stdbeta ))),
vfamily = "betaff",
deriv = eval(substitute(expression({
- mu = eta2theta(eta[, 1], .lmu, .emu )
- phi = eta2theta(eta[, 2], .lphi, .ephi )
+ mu = eta2theta(eta[, 1], .lmu , .emu )
+ phi = eta2theta(eta[, 2], .lphi , .ephi )
m1u = if ( .stdbeta ) mu else (mu - .A) / ( .B - .A)
- dmu.deta = dtheta.deta(mu, .lmu, .emu )
+ dmu.deta = dtheta.deta(mu, .lmu , .emu )
dmu1.dmu = 1 / ( .B - .A)
- dphi.deta = dtheta.deta(phi, .lphi, .ephi )
+ dphi.deta = dtheta.deta(phi, .lphi , .ephi )
temp1 = m1u*phi
temp2 = (1-m1u)*phi
if ( .stdbeta ) {
@@ -1928,9 +2242,9 @@ dfelix = function(x, a = 0.25, log = FALSE) {
trigamma(temp2) * (1-m1u)^2
d2l.dmu1phi = temp1*trigamma(temp1) - temp2*trigamma(temp2)
wz = matrix(as.numeric(NA), n, dimm(M))
- wz[,iam(1,1,M)] = d2l.dmu12 * dmu1.dmu^2 * dmu.deta^2
- wz[,iam(2,2,M)] = d2l.dphi2 * dphi.deta^2
- wz[,iam(1,2,M)] = d2l.dmu1phi * dmu1.dmu * dmu.deta * dphi.deta
+ wz[, iam(1, 1, M)] = d2l.dmu12 * dmu1.dmu^2 * dmu.deta^2
+ wz[, iam(2, 2, M)] = d2l.dphi2 * dphi.deta^2
+ wz[, iam(1, 2, M)] = d2l.dmu1phi * dmu1.dmu * dmu.deta * dphi.deta
c(w) * wz
}), list( .A = A, .B = B ))))
}
@@ -1939,298 +2253,226 @@ dfelix = function(x, a = 0.25, log = FALSE) {
- beta.ab = function(lshape1 = "loge", lshape2 = "loge",
- eshape1 = list(), eshape2 = list(),
- i1 = NULL, i2 = NULL, trim = 0.05,
- A = 0, B = 1, parallel = FALSE, zero = NULL)
+ beta.ab <- function(lshape1 = "loge", lshape2 = "loge",
+ i1 = NULL, i2 = NULL, trim = 0.05,
+ A = 0, B = 1, parallel = FALSE, zero = NULL)
{
- if (mode(lshape1) != "character" && mode(lshape1) != "name")
- lshape1 = as.character(substitute(lshape1))
- if (mode(lshape2) != "character" && mode(lshape2) != "name")
- lshape2 = as.character(substitute(lshape2))
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
- if (length( i1 ) && !is.Numeric( i1, positive = TRUE))
- stop("bad input for argument 'i1'")
- if (length( i2 ) && !is.Numeric( i2, positive = TRUE))
- stop("bad input for argument 'i2'")
- if (!is.Numeric(A, allowable.length = 1) ||
- !is.Numeric(B, allowable.length = 1) ||
- A >= B)
- stop("A must be < B, and both must be of length one")
+ lshape1 <- as.list(substitute(lshape1))
+ eshape1 <- link2list(lshape1)
+ lshape1 <- attr(eshape1, "function.name")
- stdbeta = (A == 0 && B == 1) # stdbeta == T iff standard beta distn
+ lshape2 <- as.list(substitute(lshape2))
+ eshape2 <- link2list(lshape2)
+ lshape2 <- attr(eshape2, "function.name")
- if (!is.list(eshape1)) eshape1 = list()
- if (!is.list(eshape2)) eshape2 = list()
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
- new("vglmff",
- blurb = c("Two-parameter Beta distribution ",
- "(shape parameters parameterization)\n",
- if (stdbeta)
- paste("y^(shape1-1) * (1-y)^(shape2-1) / B(shape1,shape2),",
- "0 <= y <= 1, shape1>0, shape2>0\n\n") else
- paste("(y-",A,")^(shape1-1) * (",B,
- "-y)^(shape2-1) / [B(shape1,shape2) * (",
- B, "-", A, ")^(shape1+shape2-1)], ",
- A," <= y <= ",B," shape1>0, shape2>0\n\n", sep = ""),
- "Links: ",
- namesof("shape1", lshape1, earg = eshape1), ", ",
- namesof("shape2", lshape2, earg = eshape2)),
- constraints = eval(substitute(expression({
- constraints = cm.vgam(matrix(1, M, 1), x, .parallel, constraints, int= TRUE)
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .parallel = parallel, .zero = zero ))),
- initialize = eval(substitute(expression({
- if (min(y) <= .A || max(y) >= .B)
- stop("data not within (A, B)")
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("shape1", .lshape1, earg = .eshape1, short = TRUE),
- namesof("shape2", .lshape2, earg = .eshape2, short = TRUE))
+ if (length( i1 ) && !is.Numeric( i1, positive = TRUE))
+ stop("bad input for argument 'i1'")
+ if (length( i2 ) && !is.Numeric( i2, positive = TRUE))
+ stop("bad input for argument 'i2'")
- if (!length(etastart)) {
- mu1d = mean(y, trim = .trim)
- uu = (mu1d - .A) / ( .B - .A)
- DD = ( .B - .A)^2
- pinit = max(0.01, uu^2 * (1 - uu) * DD / var(y) - uu)
- qinit = max(0.01, pinit * (1 - uu) / uu)
- etastart = matrix(0, n, 2)
- etastart[, 1] = theta2eta( pinit, .lshape1, earg = .eshape1 )
- etastart[, 2] = theta2eta( qinit, .lshape2, earg = .eshape2 )
- }
- if (is.Numeric( .i1 ))
- etastart[, 1] = theta2eta( .i1, .lshape1, earg = .eshape1 )
- if (is.Numeric( .i2 ))
- etastart[, 2] = theta2eta( .i2, .lshape2, earg = .eshape2 )
- }), list( .lshape1 = lshape1, .lshape2 = lshape2,
- .i1 = i1, .i2 = i2, .trim = trim, .A = A, .B = B,
- .eshape1 = eshape1, .eshape2 = eshape2 ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- shapes = cbind(eta2theta(eta[, 1], .lshape1, earg = .eshape1 ),
- eta2theta(eta[, 2], .lshape2, earg = .eshape2 ))
- .A + ( .B-.A) * shapes[, 1] / (shapes[, 1] + shapes[, 2])
- }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
- .eshape1 = eshape1, .eshape2 = eshape2 ))),
- last = eval(substitute(expression({
- misc$link = c(shape1 = .lshape1, shape2 = .lshape2)
- misc$earg = list(shape1 = .eshape1, shape2 = .eshape2)
- misc$limits = c( .A, .B)
- }), list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
- .eshape1 = eshape1, .eshape2 = eshape2 ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL){
- shapes = cbind(eta2theta(eta[, 1], .lshape1, earg = .eshape1 ),
- eta2theta(eta[, 2], .lshape2, earg = .eshape2 ))
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- zedd = (y - .A) / ( .B - .A)
- sum(w * (dbeta(x=zedd, shape1 = shapes[, 1], shape2 = shapes[, 2],
- log = TRUE) - log( abs( .B - .A ))))
- }
- }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
- .eshape1 = eshape1, .eshape2 = eshape2 ))),
- vfamily = "beta.ab",
- deriv = eval(substitute(expression({
- shapes = cbind(eta2theta(eta[, 1], .lshape1, earg = .eshape1 ),
- eta2theta(eta[, 2], .lshape2, earg = .eshape2 ))
- dshapes.deta = cbind(dtheta.deta(shapes[, 1], .lshape1, earg = .eshape1),
- dtheta.deta(shapes[, 2], .lshape2, earg = .eshape2))
- dl.dshapes = cbind(log(y-.A), log( .B-y)) - digamma(shapes) +
- digamma(shapes[, 1] + shapes[, 2]) - log( .B - .A)
- c(w) * dl.dshapes * dshapes.deta
- }), list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
- .eshape1 = eshape1, .eshape2 = eshape2 ))),
- weight = expression({
- temp2 = trigamma(shapes[, 1]+shapes[, 2])
- d2l.dshape12 = temp2 - trigamma(shapes[, 1])
- d2l.dshape22 = temp2 - trigamma(shapes[, 2])
- d2l.dshape1shape2 = temp2
-
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
- wz[,iam(1,1,M)] = d2l.dshape12 * dshapes.deta[, 1]^2
- wz[,iam(2,2,M)] = d2l.dshape22 * dshapes.deta[, 2]^2
- wz[,iam(1,2,M)] = d2l.dshape1shape2 * dshapes.deta[, 1] * dshapes.deta[, 2]
-
- -c(w) * wz
- }))
-}
-
-
-
- beta4 = function(link = "loge", earg = list(),
- i1=2.3, i2=2.4, iA = NULL, iB = NULL)
-{
+ if (!is.Numeric(A, allowable.length = 1) ||
+ !is.Numeric(B, allowable.length = 1) ||
+ A >= B)
+ stop("A must be < B, and both must be of length one")
+
+ stdbeta <- (A == 0 && B == 1) # stdbeta == T iff standard beta distn
+
+
+
+ new("vglmff",
+ blurb = c("Two-parameter Beta distribution ",
+ "(shape parameters parameterization)\n",
+ if (stdbeta)
+ paste("y^(shape1-1) * (1-y)^(shape2-1) / B(shape1,shape2),",
+ "0 <= y <= 1, shape1>0, shape2>0\n\n") else
+ paste("(y-",A,")^(shape1-1) * (",B,
+ "-y)^(shape2-1) / [B(shape1,shape2) * (",
+ B, "-", A, ")^(shape1+shape2-1)], ",
+ A," <= y <= ",B," shape1>0, shape2>0\n\n", sep = ""),
+ "Links: ",
+ namesof("shape1", lshape1, earg = eshape1), ", ",
+ namesof("shape2", lshape2, earg = eshape2)),
+ constraints = eval(substitute(expression({
+ constraints = cm.vgam(matrix(1, M, 1), x, .parallel ,
+ constraints, int = TRUE)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .parallel = parallel, .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (min(y) <= .A || max(y) >= .B)
+ stop("data not within (A, B)")
+
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+
+
+
+ w.y.check(w = w, y = y)
+
+
+ predictors.names <-
+ c(namesof("shape1", .lshape1 , earg = .eshape1 , short = TRUE),
+ namesof("shape2", .lshape2 , earg = .eshape2 , short = TRUE))
+
+ if (!length(etastart)) {
+ mu1d = mean(y, trim = .trim)
+ uu = (mu1d - .A) / ( .B - .A)
+ DD = ( .B - .A)^2
+ pinit = max(0.01, uu^2 * (1 - uu) * DD / var(y) - uu)
+ qinit = max(0.01, pinit * (1 - uu) / uu)
+ etastart <- matrix(0, n, 2)
+ etastart[, 1] = theta2eta( pinit, .lshape1 , earg = .eshape1 )
+ etastart[, 2] = theta2eta( qinit, .lshape2 , earg = .eshape2 )
+ }
+ if (is.Numeric( .i1 ))
+ etastart[, 1] = theta2eta( .i1, .lshape1 , earg = .eshape1 )
+ if (is.Numeric( .i2 ))
+ etastart[, 2] = theta2eta( .i2, .lshape2 , earg = .eshape2 )
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2,
+ .i1 = i1, .i2 = i2, .trim = trim, .A = A, .B = B,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ shapes = cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
+ eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
+ .A + ( .B-.A) * shapes[, 1] / (shapes[, 1] + shapes[, 2])
+ }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
+ last = eval(substitute(expression({
+ misc$link <- c(shape1 = .lshape1 , shape2 = .lshape2)
+ misc$earg <- list(shape1 = .eshape1 , shape2 = .eshape2)
+ misc$limits <- c( .A, .B)
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL){
+ shapes = cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
+ eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ zedd = (y - .A) / ( .B - .A)
+ sum(c(w) * (dbeta(x = zedd, shape1 = shapes[, 1],
+ shape2 = shapes[, 2],
+ log = TRUE) - log( abs( .B - .A ))))
+ }
+ }, list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
+ vfamily = "beta.ab",
+ deriv = eval(substitute(expression({
+ shapes = cbind(eta2theta(eta[, 1], .lshape1 , earg = .eshape1 ),
+ eta2theta(eta[, 2], .lshape2 , earg = .eshape2 ))
+
+ dshapes.deta <-
+ cbind(dtheta.deta(shapes[, 1], .lshape1 , earg = .eshape1),
+ dtheta.deta(shapes[, 2], .lshape2 , earg = .eshape2))
+
+ dl.dshapes = cbind(log(y-.A), log( .B-y)) - digamma(shapes) +
+ digamma(shapes[, 1] + shapes[, 2]) - log( .B - .A)
+
+ c(w) * dl.dshapes * dshapes.deta
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2, .A = A, .B = B,
+ .eshape1 = eshape1, .eshape2 = eshape2 ))),
+ weight = expression({
+ temp2 = trigamma(shapes[, 1]+shapes[, 2])
+ d2l.dshape12 = temp2 - trigamma(shapes[, 1])
+ d2l.dshape22 = temp2 - trigamma(shapes[, 2])
+ d2l.dshape1shape2 = temp2
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz[, iam(1, 1, M)] = d2l.dshape12 * dshapes.deta[, 1]^2
+ wz[, iam(2, 2, M)] = d2l.dshape22 * dshapes.deta[, 2]^2
+ wz[, iam(1, 2, M)] = d2l.dshape1shape2 * dshapes.deta[, 1] *
+ dshapes.deta[, 2]
+ -c(w) * wz
+ }))
+}
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
- new("vglmff",
- blurb = c("Four-parameter Beta distribution\n",
- "(y-A)^(shape1-1) * (B-y)^(shape2-1), A < y < B \n\n",
- "Links: ",
- namesof("shape1", link, earg = earg), ", ",
- namesof("shape2", link, earg = earg), ", ",
- " A, B"),
- initialize = eval(substitute(expression({
- if (!is.vector(y) || (is.matrix(y) && ncol(y) != 1))
- stop("y must be a vector or a one-column matrix")
-
- if (length( .iA) && any(y < .iA))
- stop("initial 'A' value out of range")
- if (length( .iB) && any(y > .iB))
- stop("initial 'B' value out of range")
-
- predictors.names = c(
- namesof("shape1", .link, earg = .earg, short = TRUE),
- namesof("shape2", .link, earg = .earg, short = TRUE), "A", "B")
- my.range = diff(range(y))
- if (!length(etastart)) {
- etastart = cbind(shape1= rep( .i1, length.out = length(y)),
- shape2= .i2,
- A = if (length( .iA)) .iA else
- min(y)-my.range/70,
- B = if (length( .iB)) .iB else
- max(y)+my.range/70)
- }
- }), list( .i1=i1, .i2=i2, .iA=iA, .iB=iB,
- .link = link, .earg = earg ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- shapes = eta2theta(eta[, 1:2], .link, earg = .earg )
- .A = eta[, 3]
- .B = eta[, 4]
- .A + ( .B-.A) * shapes[, 1] / (shapes[, 1] + shapes[, 2])
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = c(shape1 = .link, shape2 = .link,
- A = "identity", B = "identity")
- misc$earg = list(shape1 = .earg, shape2 = .earg,
- A = list(), B = list())
- }), list( .link = link, .earg = earg ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- shapes = eta2theta(eta[, 1:2], .link, earg = .earg )
- .A = eta[, 3]
- .B = eta[, 4]
- temp = lbeta(shapes[, 1], shapes[, 2])
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * ((shapes[, 1]-1)*log(y-.A) + (shapes[, 2]-1)*log( .B-y) - temp -
- (shapes[, 1]+shapes[, 2]-1)*log( .B-.A )))
- }, list( .link = link, .earg = earg ))),
- vfamily = "beta4",
- deriv = eval(substitute(expression({
- shapes = eta2theta(eta[, 1:2], .link, earg = .earg )
- .A = eta[, 3]
- .B = eta[, 4]
- dshapes.deta = dtheta.deta(shapes, .link, earg = .earg )
- rr1 = ( .B - .A)
- temp3 = (shapes[, 1] + shapes[, 2] - 1)
- temp1 = temp3 / rr1
- dl.dshapes = cbind(log(y-.A), log( .B-y)) - digamma(shapes) +
- digamma(shapes[, 1] + shapes[, 2]) - log( .B - .A)
- dl.dA = -(shapes[, 1]-1) / (y- .A) + temp1
- dl.dB = (shapes[, 2]-1) / ( .B - y) - temp1
- c(w) * cbind(dl.dshapes * dshapes.deta, dl.dA, dl.dB)
- }), list( .link = link, .earg = earg ))),
- weight = expression({
-
- temp2 = trigamma(shapes[, 1]+shapes[, 2])
- d2l.dshape12 = temp2 - trigamma(shapes[, 1])
- d2l.dshape22 = temp2 - trigamma(shapes[, 2])
- d2l.dshape1shape2 = temp2
-
- ed2l.dAA = -temp3 * shapes[, 2] / ((shapes[, 1]-2) * rr1^2)
- ed2l.dBB = -temp3 * shapes[, 1] / ((shapes[, 2]-2) * rr1^2)
- ed2l.dAB = -temp3 / (rr1^2)
- ed2l.dAshape1 = -shapes[, 2] / ((shapes[, 1]-1) * rr1)
- ed2l.dAshape2 = 1/rr1
- ed2l.dBshape1 = -1/rr1
- ed2l.dBshape2 = shapes[, 1] / ((shapes[, 2]-1) * rr1)
-
- wz = matrix(as.numeric(NA), n, dimm(M)) #10=dimm(M)
- wz[,iam(1,1,M)] = d2l.dshape12 * dshapes.deta[, 1]^2
- wz[,iam(2,2,M)] = d2l.dshape22 * dshapes.deta[, 2]^2
- wz[,iam(1,2,M)] = d2l.dshape1shape2 * dshapes.deta[, 1] * dshapes.deta[, 2]
-
- wz[,iam(3,3,M)] = ed2l.dAA
- wz[,iam(4,4,M)] = ed2l.dBB
- wz[,iam(4,3,M)] = ed2l.dAB
-
- wz[,iam(3,1,M)] = ed2l.dAshape1 * dshapes.deta[, 1]
- wz[,iam(3,2,M)] = ed2l.dAshape2 * dshapes.deta[, 2]
- wz[,iam(4,1,M)] = ed2l.dBshape1 * dshapes.deta[, 1]
- wz[,iam(4,2,M)] = ed2l.dBshape2 * dshapes.deta[, 2]
-
-
- -c(w) * wz
- }))
-}
-
-
-
- simple.exponential = function()
-{
+
+
+ simple.exponential <- function() {
new("vglmff",
- blurb = c("Simple Exponential distribution\n",
- "Link: log(rate)\n"),
- deviance= function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- devy = -log(y) - 1
- devmu = -log(mu) - y/mu
- devi = 2 * (devy - devmu)
- if (residuals) sign(y - mu) * sqrt(abs(devi) * w) else sum(w * devi)
+ blurb = c("Simple exponential distribution\n",
+ "Link: log(rate)\n"),
+ deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ devy <- -log(y) - 1
+ devmu <- -log(mu) - y/mu
+ devi <- 2 * (devy - devmu)
+ if (residuals) sign(y - mu) * sqrt(abs(devi) * w) else sum(w * devi)
},
- initialize=expression({
- predictors.names = "log(rate)"
- mustart = y + (y == 0) / 8
+ initialize = expression({
+ predictors.names <- "log(rate)"
+ mustart <- y + (y == 0) / 8
}),
linkinv = function(eta, extra = NULL)
- exp(-eta),
- link = function(mu, extra = NULL)
- -log(mu),
+ exp(-eta),
+ linkfun = function(mu, extra = NULL)
+ -log(mu),
vfamily = "simple.exponential",
- deriv=expression({
- rate = 1 / mu
- dl.drate = mu - y
- drate.deta = dtheta.deta(rate, "loge")
- c(w) * dl.drate * drate.deta
+ deriv = expression({
+ rate <- 1 / mu
+ dl.drate <- mu - y
+ drate.deta <- dtheta.deta(rate, "loge")
+ c(w) * dl.drate * drate.deta
}),
weight = expression({
- ed2l.drate2 = -1 / rate^2
- wz = -c(w) * drate.deta^2 * ed2l.drate2
- wz
+ ned2l.drate2 <- 1 / rate^2
+ wz <- c(w) * drate.deta^2 * ned2l.drate2
+ wz
}))
}
- exponential <- function(link = "loge", earg = list(),
- location = 0, expected = TRUE) {
+ exponential <- function(link = "loge",
+ location = 0, expected = TRUE,
+ shrinkage.init = 0.95,
+ zero = NULL) {
if (!is.Numeric(location, allowable.length = 1))
- stop("bad input for argument 'location'")
+ stop("bad input for argument 'location'")
- if (mode(link) != "character" && mode(link) != "name")
- link <- as.character(substitute(link))
- if (!is.list(earg)) earg = list()
if (!is.logical(expected) || length(expected) != 1)
stop("bad input for argument 'expected'")
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+ if (!is.Numeric(shrinkage.init, allowable.length = 1) ||
+ shrinkage.init < 0 ||
+ shrinkage.init > 1)
+ stop("bad input for argument 'shrinkage.init'")
+
+
new("vglmff",
blurb = c("Exponential distribution\n\n",
- "Link: ", namesof("rate", link, tag = TRUE), "\n",
+ "Link: ",
+ namesof("rate", link, earg, tag = TRUE), "\n",
"Mean: ", "mu = ",
- if (location == 0) "1/rate" else
- paste(location, "+ 1/rate"), "\n",
+ if (location == 0) "1 / rate" else
+ paste(location, "+ 1 / rate"), "\n",
"Variance: ",
if (location == 0) "Exponential: mu^2" else
paste("(mu - ", location, ")^2", sep = "")),
+
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ zero = .zero )
+ }, list( .zero = zero ))),
+
deviance = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
devy <- -log(y - .location) - 1
@@ -2243,406 +2485,570 @@ dfelix = function(x, a = 0.25, log = FALSE) {
}
}, list( .location = location, .earg = earg ))),
initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- extra$loc <- .location # Passed into, e.g., @linkfun, @deriv etc.
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+ ncoly <- ncol(y)
+ Musual <- 1
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+ extra$Loc <- matrix( .location , n, ncoly, byrow = TRUE)
- if (any(y <= extra$loc))
- stop("all responses must be greater than ", extra$loc)
- predictors.names <- namesof("rate", .link, tag = FALSE)
+ if (any(y <= extra$Loc))
+ stop("all responses must be greater than ", extra$Loc)
+
+ mynames1 <- if (M == 1) "rate" else paste("rate", 1:M, sep = "")
+ predictors.names <-
+ namesof(mynames1, .link , earg = .earg , short = TRUE)
if (length(mustart) + length(etastart) == 0)
- mustart <- y + (y == extra$loc) / 8
+ mustart <- matrix(colSums(y * w) / colSums(w),
+ n, M, byrow = TRUE) * .sinit +
+ (1 - .sinit) * y +
+ 1 / 8
+
+
if (!length(etastart))
- etastart <- theta2eta(1 / (mustart - extra$loc),
- .link, earg = .earg )
- }), list( .location = location, .link = link, .earg = earg ))),
+ etastart <- theta2eta(1 / (mustart - extra$Loc),
+ .link , earg = .earg )
+ }), list( .location = location,
+ .link = link, .earg = earg,
+ .sinit = shrinkage.init ))),
linkinv = eval(substitute(function(eta, extra = NULL)
- extra$loc + 1 / eta2theta(eta, .link, earg = .earg ),
+ extra$Loc + 1 / eta2theta(eta, .link , earg = .earg ),
list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$location <- extra$loc
- misc$link <- c(rate = .link)
- misc$earg <- list(rate = .earg )
+ misc$link <- rep( .link , length = M)
+ names(misc$link) <- mynames1
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- mynames1
+ for(ii in 1:M) {
+ misc$earg[[ii]] <- .earg
+ }
+
+ misc$location <- .location
misc$expected <- .expected
- }), list( .link = link, .earg = earg, .expected = expected ))),
+ misc$multipleResponses <- TRUE
+ misc$Musual <- Musual
+ }), list( .link = link, .earg = earg,
+ .expected = expected, .location = location ))),
linkfun = eval(substitute(function(mu, extra = NULL)
- theta2eta(1 / (mu - extra$loc), .link, earg = .earg ),
+ theta2eta(1 / (mu - extra$Loc), .link , earg = .earg ),
list( .link = link, .earg = earg ))),
vfamily = c("exponential"),
deriv = eval(substitute(expression({
- rate <- 1 / (mu - extra$loc)
+ rate <- 1 / (mu - extra$Loc)
dl.drate <- mu - y
- drate.deta <- dtheta.deta(rate, .link, earg = .earg )
+ drate.deta <- dtheta.deta(rate, .link , earg = .earg )
c(w) * dl.drate * drate.deta
}), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
- d2l.drate2 <- -((mu-extra$loc)^2)
- wz <- -(drate.deta^2) * d2l.drate2
- if (! .expected) {
- d2rate.deta2 <- d2theta.deta2(rate, .link, earg = .earg )
+ ned2l.drate2 <- (mu - extra$Loc)^2
+ wz <- ned2l.drate2 * drate.deta^2
+ if (! .expected ) {
+ d2rate.deta2 <- d2theta.deta2(rate, .link , earg = .earg )
wz <- wz - dl.drate * d2rate.deta2
}
- c(w) * wz
+ c(w) * wz
}), list( .link = link, .expected = expected, .earg = earg ))))
}
- gamma1 = function(link = "loge", earg = list())
+ gamma1 <- function(link = "loge", zero = NULL)
{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
- new("vglmff",
- blurb = c("1-parameter Gamma distribution\n",
- "Link: ",
- namesof("shape", link, earg = earg, tag = TRUE), "\n",
- "Mean: mu (=shape)\n",
- "Variance: mu (=shape)"),
- initialize = eval(substitute(expression({
- if (any(y <= 0))
- stop("all responses must be positive")
- M = if (is.matrix(y)) ncol(y) else 1
- temp.names = if (M == 1) "shape" else paste("shape", 1:M, sep = "")
- predictors.names =
- namesof(temp.names, .link, earg = .earg, short = TRUE)
- if (!length(etastart))
- etastart = cbind(theta2eta(y + 1/8, .link, earg = .earg ))
- }), list( .link = link, .earg = earg ))),
- linkinv = eval(substitute(function(eta, extra = NULL)
- eta2theta(eta, .link, earg = .earg )),
- list( .link = link, .earg = earg )),
- last = eval(substitute(expression({
- temp.names = if (M == 1) "shape" else paste("shape", 1:M, sep = "")
- misc$link = rep( .link, length = M)
- names(misc$link) = temp.names
- misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
- for(ii in 1:M) misc$earg[[ii]] = .earg
- misc$expected = TRUE
- }), list( .link = link, .earg = earg ))),
- linkfun = eval(substitute(function(mu, extra = NULL)
- theta2eta(mu, .link, earg = .earg )),
- list( .link = link, .earg = earg )),
- loglikelihood= function(mu, y, w, residuals = FALSE, eta, extra = NULL)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dgamma(x = y, shape=mu, scale = 1, log = TRUE))
- },
- vfamily = c("gamma1"),
- deriv = eval(substitute(expression({
- shape = mu
- dl.dshape = log(y) - digamma(shape)
- dshape.deta = dtheta.deta(shape, .link, earg = .earg )
- c(w) * dl.dshape * dshape.deta
- }), list( .link = link, .earg = earg ))),
- weight = expression({
- d2l.dshape = -trigamma(shape)
- wz = -(dshape.deta^2) * d2l.dshape
- c(w) * wz
- }))
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+
+ new("vglmff",
+ blurb = c("1-parameter Gamma distribution\n",
+ "Link: ",
+ namesof("shape", link, earg = earg, tag = TRUE), "\n",
+ "Mean: mu (=shape)\n",
+ "Variance: mu (=shape)"),
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 1
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ zero = .zero )
+ }, list( .zero = zero ))),
+
+ initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ M <- if (is.matrix(y)) ncol(y) else 1
+ Musual <- 1
+
+ mynames1 <- if (M == 1) "shape" else paste("shape", 1:M, sep = "")
+ predictors.names <-
+ namesof(mynames1, .link , earg = .earg , short = TRUE)
+
+ if (!length(etastart))
+ etastart <- cbind(theta2eta(y + 1/8, .link , earg = .earg ))
+ }), list( .link = link, .earg = earg ))),
+ linkinv = eval(substitute(function(eta, extra = NULL)
+ eta2theta(eta, .link , earg = .earg )),
+ list( .link = link, .earg = earg )),
+ last = eval(substitute(expression({
+ misc$link <- rep( .link , length = M)
+ names(misc$link) <- mynames1
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- names(misc$link)
+ for(ii in 1:M)
+ misc$earg[[ii]] <- .earg
+
+ misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
+ misc$Musual <- Musual
+ }), list( .link = link, .earg = earg ))),
+ linkfun = eval(substitute(function(mu, extra = NULL)
+ theta2eta(mu, .link , earg = .earg )),
+ list( .link = link, .earg = earg )),
+ loglikelihood = function(mu, y, w, residuals = FALSE, eta, extra = NULL)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dgamma(x = y, shape = mu, scale = 1, log = TRUE))
+ },
+ vfamily = c("gamma1"),
+ deriv = eval(substitute(expression({
+ shape <- mu
+ dl.dshape <- log(y) - digamma(shape)
+ dshape.deta <- dtheta.deta(shape, .link , earg = .earg )
+ ans <- c(w) * dl.dshape * dshape.deta
+ ans
+ c(w) * dl.dshape * dshape.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = expression({
+ ned2l.dshape <- trigamma(shape)
+ wz <- ned2l.dshape * dshape.deta^2
+ c(w) * wz
+ }))
}
- gamma2.ab = function(lrate = "loge", lshape = "loge",
- erate = list(), eshape = list(),
- irate = NULL, ishape = NULL, expected = TRUE, zero = 2)
+
+
+ gamma2.ab <-
+ function(lrate = "loge", lshape = "loge",
+ irate = NULL, ishape = NULL, expected = TRUE, zero = 2)
{
- if (mode(lrate) != "character" && mode(lrate) != "name")
- lrate = as.character(substitute(lrate))
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
- if (length( irate) && !is.Numeric(irate, positive = TRUE))
- stop("bad input for argument 'irate'")
- if (length( ishape) && !is.Numeric(ishape, positive = TRUE))
- stop("bad input for argument 'ishape'")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
- if (!is.logical(expected) || length(expected) != 1)
- stop("bad input for argument 'expected'")
- if (!is.list(erate)) erate = list()
- if (!is.list(eshape)) eshape = list()
+ lrate <- as.list(substitute(lrate))
+ erate <- link2list(lrate)
+ lrate <- attr(erate, "function.name")
+
+ lshape <- as.list(substitute(lshape))
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
+
+
+ if (length( irate) && !is.Numeric(irate, positive = TRUE))
+ stop("bad input for argument 'irate'")
+ if (length( ishape) && !is.Numeric(ishape, positive = TRUE))
+ stop("bad input for argument 'ishape'")
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+ if (!is.logical(expected) || length(expected) != 1)
+ stop("bad input for argument 'expected'")
+
+
+
+
+ new("vglmff",
+ blurb = c("2-parameter Gamma distribution\n",
+ "Links: ",
+ namesof("rate", lrate, earg = erate), ", ",
+ namesof("shape", lshape, earg = eshape), "\n",
+ "Mean: mu = shape/rate\n",
+ "Variance: (mu^2)/shape = shape/rate^2"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE)
+
+
+ predictors.names <-
+ c(namesof("rate", .lrate , earg = .erate , tag = FALSE),
+ namesof("shape", .lshape , earg = .eshape , tag = FALSE))
+
+ if (!length(etastart)) {
+ mymu = y + 0.167 * (y == 0)
+ junk = lsfit(x, y, wt = w, intercept = FALSE)
+ var.y.est = sum(c(w) * junk$resid^2) / (nrow(x) - length(junk$coef))
+ init.shape = if (length( .ishape )) .ishape else mymu^2 / var.y.est
+ init.rate = if (length( .irate)) .irate else init.shape / mymu
+ init.rate = rep(init.rate, length.out = n)
+ init.shape = rep(init.shape, length.out = n)
+ if ( .lshape == "loglog")
+ init.shape[init.shape <= 1] = 3.1 # Hopefully value is big enough
+ etastart <-
+ cbind(theta2eta(init.rate, .lrate , earg = .erate ),
+ theta2eta(init.shape, .lshape , earg = .eshape ))
+ }
+ }), list( .lrate = lrate, .lshape = lshape,
+ .irate = irate, .ishape = ishape,
+ .erate = erate, .eshape = eshape))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[, 2], .lshape , earg = .eshape ) / (
+ eta2theta(eta[, 1], .lrate , earg = .erate ))
+ }, list( .lrate = lrate, .lshape = lshape,
+ .erate = erate, .eshape = eshape))),
+ last = eval(substitute(expression({
+ misc$link <- c(rate = .lrate , shape = .lshape)
+ misc$earg <- list(rate = .erate, shape = .eshape )
+ }), list( .lrate = lrate, .lshape = lshape,
+ .erate = erate, .eshape = eshape))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ rate = eta2theta(eta[, 1], .lrate , earg = .erate )
+ shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dgamma(x = y, shape = shape, rate=rate, log = TRUE))
+ }
+ }, list( .lrate = lrate, .lshape = lshape,
+ .erate = erate, .eshape = eshape))),
+ vfamily = c("gamma2.ab"),
+ deriv = eval(substitute(expression({
+ rate = eta2theta(eta[, 1], .lrate , earg = .erate )
+ shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
+ dl.drate = mu - y
+ dl.dshape = log(y*rate) - digamma(shape)
+ dratedeta = dtheta.deta(rate, .lrate , earg = .erate )
+ dshape.deta = dtheta.deta(shape, .lshape , earg = .eshape )
+ c(w) * cbind(dl.drate * dratedeta,
+ dl.dshape * dshape.deta)
+ }), list( .lrate = lrate, .lshape = lshape,
+ .erate = erate, .eshape = eshape))),
+ weight = eval(substitute(expression({
+ d2l.dshape2 = -trigamma(shape)
+ d2l.drate2 = -shape/(rate^2)
+ d2l.drateshape = 1/rate
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz[, iam(1, 1, M)] = -d2l.drate2 * dratedeta^2
+ wz[, iam(2, 2, M)] = -d2l.dshape2 * dshape.deta^2
+ wz[, iam(1, 2, M)] = -d2l.drateshape * dratedeta * dshape.deta
+ if (! .expected) {
+ d2ratedeta2 = d2theta.deta2(rate, .lrate , earg = .erate )
+ d2shapedeta2 = d2theta.deta2(shape, .lshape , earg = .eshape )
+ wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] - dl.drate * d2ratedeta2
+ wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)] - dl.dshape * d2shapedeta2
+ }
+ c(w) * wz
+ }), list( .lrate = lrate, .lshape = lshape,
+ .erate = erate, .eshape = eshape, .expected = expected ))))
+}
+
+
+
+ gamma2 <-
+ function(lmu = "loge", lshape = "loge",
+ imethod = 1, ishape = NULL,
+ parallel = FALSE, intercept.apply = FALSE,
+ deviance.arg = FALSE, zero = -2)
+{
+
+
+
+
+ lmu <- as.list(substitute(lmu))
+ emu <- link2list(lmu)
+ lmu <- attr(emu, "function.name")
+
+ lshape <- as.list(substitute(lshape))
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
+
+
+ if (length(zero) && !is.Numeric(zero, integer.valued = TRUE))
+ stop("bad input for argument 'zero'")
+
+ if (length( ishape) && !is.Numeric(ishape, positive = TRUE))
+ stop("bad input for argument 'ishape'")
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+
+
+ if (!is.logical(intercept.apply) ||
+ length(intercept.apply) != 1)
+ stop("argument 'intercept.apply' must be a single logical")
+
+ if (is.logical(parallel) && parallel && length(zero))
+ stop("set 'zero = NULL' if 'parallel = TRUE'")
+
+
+ ans =
new("vglmff",
- blurb = c("2-parameter Gamma distribution\n",
+ blurb = c("2-parameter Gamma distribution",
+ " (McCullagh and Nelder 1989 parameterization)\n",
"Links: ",
- namesof("rate", lrate, earg = erate), ", ",
+ namesof("mu", lmu, earg = emu), ", ",
namesof("shape", lshape, earg = eshape), "\n",
- "Mean: mu = shape/rate\n",
- "Variance: (mu^2)/shape = shape/rate^2"),
+ "Mean: mu\n",
+ "Variance: (mu^2)/shape"),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- # Error check
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- if (any(y <= 0))
- stop("all responses must be positive")
- predictors.names =
- c(namesof("rate", .lrate, earg = .erate, tag = FALSE),
- namesof("shape", .lshape, earg = .eshape, tag = FALSE))
- if (!length(etastart)) {
- mymu = y + 0.167 * (y == 0)
- junk = lsfit(x, y, wt = w, intercept = FALSE)
- var.y.est = sum(w * junk$resid^2) / (nrow(x) - length(junk$coef))
- init.shape = if (length( .ishape)) .ishape else mymu^2 / var.y.est
- init.rate = if (length( .irate)) .irate else init.shape / mymu
- init.rate = rep(init.rate, length.out = n)
- init.shape = rep(init.shape, length.out = n)
- if ( .lshape == "loglog")
- init.shape[init.shape <= 1] = 3.1 #Hopefully value is big enough
- etastart = cbind(theta2eta(init.rate, .lrate, earg = .erate),
- theta2eta(init.shape, .lshape, earg = .eshape))
- }
- }), list( .lrate = lrate, .lshape = lshape, .irate=irate, .ishape = ishape,
- .erate = erate, .eshape = eshape ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[, 2], .lshape, earg = .eshape) / eta2theta(eta[, 1], .lrate,
- earg = .erate)
- }, list( .lrate = lrate, .lshape = lshape,
- .erate = erate, .eshape = eshape ))),
- last = eval(substitute(expression({
- misc$link = c(rate = .lrate, shape = .lshape)
- misc$earg = list(rate = .erate, shape = .eshape)
- }), list( .lrate = lrate, .lshape = lshape,
- .erate = erate, .eshape = eshape ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- rate = eta2theta(eta[, 1], .lrate, earg = .erate)
- shape = eta2theta(eta[, 2], .lshape, earg = .eshape)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dgamma(x = y, shape = shape, rate=rate, log = TRUE))
+
+ constraints = cm.vgam(matrix(1, M, 1), x, .parallel , constraints,
+ intercept.apply = .intercept.apply )
+
+ dotzero <- .zero
+ Musual <- 2
+ eval(negzero.expression)
+ constraints <- cm.zero.vgam(constraints, x, z_Index, M)
+ }), list( .zero = zero,
+ .parallel = parallel, .intercept.apply = intercept.apply ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ zero = .zero )
+ }, list( .zero = zero ))),
+
+
+ initialize = eval(substitute(expression({
+ Musual <- 2
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ assign("CQO.FastAlgorithm", ( .lmu == "loge" && .lshape == "loge"),
+ envir = VGAM:::VGAMenv)
+ if (any(function.name == c("cqo","cao")) &&
+ is.Numeric( .zero , allowable.length = 1) && .zero != -2)
+ stop("argument zero = -2 is required")
+
+ M = Musual * ncol(y)
+ NOS = ncoly = ncol(y) # Number of species
+
+
+ temp1.names =
+ if (NOS == 1) "mu" else paste("mu", 1:NOS, sep = "")
+ temp2.names =
+ if (NOS == 1) "shape" else paste("shape", 1:NOS, sep = "")
+ predictors.names <-
+ c(namesof(temp1.names, .lmu , earg = .emu , tag = FALSE),
+ namesof(temp2.names, .lshape , earg = .eshape, tag = FALSE))
+ predictors.names <- predictors.names[interleave.VGAM(M, M = Musual)]
+
+
+
+
+ if (is.logical( .parallel ) & .parallel & ncoly > 1)
+ warning("the constraint matrices may not be correct with ",
+ "multiple responses")
+
+
+
+ if (!length(etastart)) {
+ init.shape = matrix(1.0, n, NOS)
+ mymu = y # + 0.167 * (y == 0) # imethod == 1 (the default)
+ if ( .imethod == 2) {
+ for(ii in 1:ncol(y)) {
+ mymu[, ii] = weighted.mean(y[, ii], w = w[, ii])
+ }
}
- }, list( .lrate = lrate, .lshape = lshape,
- .erate = erate, .eshape = eshape ))),
- vfamily = c("gamma2.ab"),
- deriv = eval(substitute(expression({
- rate = eta2theta(eta[, 1], .lrate, earg = .erate)
- shape = eta2theta(eta[, 2], .lshape, earg = .eshape)
- dl.drate = mu - y
- dl.dshape = log(y*rate) - digamma(shape)
- dratedeta = dtheta.deta(rate, .lrate, earg = .erate)
- dshape.deta = dtheta.deta(shape, .lshape, earg = .eshape)
- c(w) * cbind(dl.drate * dratedeta,
- dl.dshape * dshape.deta)
- }), list( .lrate = lrate, .lshape = lshape,
- .erate = erate, .eshape = eshape ))),
- weight = eval(substitute(expression({
- d2l.dshape2 = -trigamma(shape)
- d2l.drate2 = -shape/(rate^2)
- d2l.drateshape = 1/rate
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
- wz[,iam(1,1,M)] = -d2l.drate2 * dratedeta^2
- wz[,iam(2,2,M)] = -d2l.dshape2 * dshape.deta^2
- wz[,iam(1,2,M)] = -d2l.drateshape * dratedeta * dshape.deta
- if (! .expected) {
- d2ratedeta2 = d2theta.deta2(rate, .lrate, earg = .erate)
- d2shapedeta2 = d2theta.deta2(shape, .lshape, earg = .eshape)
- wz[,iam(1,1,M)] = wz[,iam(1,1,M)] - dl.drate * d2ratedeta2
- wz[,iam(2,2,M)] = wz[,iam(2,2,M)] - dl.dshape * d2shapedeta2
+ for(spp in 1:NOS) {
+ junk = lsfit(x, y[, spp], wt = w[, spp], intercept = FALSE)
+ var.y.est = sum(w[, spp] * junk$resid^2) / (n - length(junk$coef))
+ init.shape[, spp] = if (length( .ishape )) .ishape else
+ mymu[, spp]^2 / var.y.est
+ if ( .lshape == "loglog")
+ init.shape[init.shape[, spp] <= 1,spp] = 3.1
}
- c(w) * wz
- }), list( .lrate = lrate, .lshape = lshape,
- .erate = erate, .eshape = eshape, .expected = expected ))))
-}
+ etastart <-
+ cbind(theta2eta(mymu, .lmu , earg = .emu ),
+ theta2eta(init.shape, .lshape , earg = .eshape ))
+ etastart <-
+ etastart[, interleave.VGAM(M, M = Musual), drop = FALSE]
+ }
+ }), list( .lmu = lmu, .lshape = lshape, .ishape = ishape,
+ .emu = emu, .eshape = eshape,
+ .parallel = parallel, .intercept.apply = intercept.apply,
+ .zero = zero, .imethod = imethod ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ Musual <- 2
+ NOS = ncol(eta) / Musual
+ eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ .lmu , earg = .emu )
+ }, list( .lmu = lmu, .emu = emu ))),
+ last = eval(substitute(expression({
+ if (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
+ rm("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)
+
+ tmp34 = c(rep( .lmu , length = NOS),
+ rep( .lshape , length = NOS))
+ names(tmp34) =
+ c(if (NOS == 1) "mu" else paste("mu", 1:NOS, sep = ""),
+ if (NOS == 1) "shape" else paste("shape", 1:NOS, sep = ""))
+ tmp34 = tmp34[interleave.VGAM(M, M = 2)]
+ misc$link = tmp34 # Already named
+
+ misc$earg = vector("list", M)
+ names(misc$earg) = names(misc$link)
+ for(ii in 1:NOS) {
+ misc$earg[[Musual*ii-1]] = .emu
+ misc$earg[[Musual*ii ]] = .eshape
+ }
+
+ misc$Musual <- Musual
+ misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
+ misc$parallel <- .parallel
+ misc$intercept.apply <- .intercept.apply
+ }), list( .lmu = lmu, .lshape = lshape,
+ .emu = emu, .eshape = eshape,
+ .parallel = parallel, .intercept.apply = intercept.apply ))),
+ linkfun = eval(substitute(function(mu, extra = NULL) {
+ temp = theta2eta(mu, .lmu , earg = .emu )
+ temp = cbind(temp, NA * temp)
+ temp[, interleave.VGAM(ncol(temp), M = 2), drop = FALSE]
+ }, list( .lmu = lmu, .emu = emu ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ Musual <- 2
+ NOS = ncol(eta) / Musual
+ mymu = mu # eta2theta(eta[, 2*(1:NOS)-1], .lmu , earg = .emu )
+ shapemat = eta2theta(eta[, Musual * (1:NOS), drop = FALSE],
+ .lshape , earg = .eshape )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dgamma(x = y,
+ shape = c(shapemat),
+ scale = c(mymu/shapemat),
+ log = TRUE))
+ }
+ }, list( .lmu = lmu, .lshape = lshape,
+ .emu = emu, .eshape = eshape))),
+ vfamily = c("gamma2"),
+ deriv = eval(substitute(expression({
+ Musual <- 2
+ NOS = ncol(eta) / Musual
+ mymu = eta2theta(eta[, Musual * (1:NOS) - 1],
+ .lmu , earg = .emu )
+ shape = eta2theta(eta[, Musual * (1:NOS)],
+ .lshape , earg = .eshape )
+ dl.dmu = shape * (y / mymu - 1) / mymu
+ dl.dshape = log(y) + log(shape) - log(mymu) + 1 - digamma(shape) -
+ y / mymu
- gamma2 = function(lmu = "loge", lshape = "loge",
- emu = list(), eshape = list(),
- imethod = 1,
- deviance.arg = FALSE, ishape = NULL, zero = -2)
-{
+ dmu.deta = dtheta.deta(mymu, .lmu , earg = .emu )
+ dshape.deta = dtheta.deta(shape, .lshape , earg = .eshape )
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu = as.character(substitute(lmu))
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
- if (length(zero) && !is.Numeric(zero, integer.valued = TRUE))
- stop("bad input for argument 'zero'")
- if (length( ishape) && !is.Numeric(ishape, positive = TRUE))
- stop("bad input for argument 'ishape'")
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
-
- if (!is.list(emu)) emu = list()
- if (!is.list(eshape)) eshape = list()
+ myderiv = c(w) * cbind(dl.dmu * dmu.deta,
+ dl.dshape * dshape.deta)
+ myderiv[, interleave.VGAM(M, M = Musual)]
+ }), list( .lmu = lmu, .lshape = lshape,
+ .emu = emu, .eshape = eshape))),
+ weight = eval(substitute(expression({
+ ned2l.dmu2 = shape / (mymu^2)
+ ned2l.dshape2 = trigamma(shape) - 1 / shape
+ wz = matrix(as.numeric(NA), n, M) # 2 = M; diagonal!
- ans =
- new("vglmff",
- blurb = c("2-parameter Gamma distribution",
- " (McCullagh and Nelder 1989 parameterization)\n",
- "Links: ",
- namesof("mu", lmu, earg = emu), ", ",
- namesof("shape", lshape, earg = eshape), "\n",
- "Mean: mu\n",
- "Variance: (mu^2)/shape"),
- constraints = eval(substitute(expression({
+ wz[, Musual*(1:NOS)-1] = ned2l.dmu2 * dmu.deta^2
+ wz[, Musual*(1:NOS) ] = ned2l.dshape2 * dshape.deta^2
- dotzero <- .zero
- Musual <- 2
- eval(negzero.expression)
- constraints = cm.zero.vgam(constraints, x, z_Index, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- Musual <- 2
- assign("CQO.FastAlgorithm", ( .lmu == "loge" && .lshape == "loge"),
- envir = VGAM:::VGAMenv)
- if (any(function.name == c("cqo","cao")) &&
- is.Numeric( .zero, allowable.length = 1) && .zero != -2)
- stop("argument zero = -2 is required")
-
- y = as.matrix(y)
- M = Musual * ncol(y)
- NOS = ncoly = ncol(y) # Number of species
- temp1.names =
- if (NOS == 1) "mu" else paste("mu", 1:NOS, sep = "")
- temp2.names =
- if (NOS == 1) "shape" else paste("shape", 1:NOS, sep = "")
- predictors.names =
- c(namesof(temp1.names, .lmu, earg = .emu, tag = FALSE),
- namesof(temp2.names, .lshape, earg = .eshape, tag = FALSE))
- predictors.names = predictors.names[interleave.VGAM(M, M = Musual)]
-
-
- # Error check
- if (any(y <= 0))
- stop("all responses must be positive") # see @loglikelihood
- if (!length(etastart)) {
- init.shape = matrix(1.0, n, NOS)
- mymu = y # + 0.167 * (y == 0) # imethod == 1 (the default)
- if ( .imethod == 2) {
- for(ii in 1:ncol(y)) {
- mymu[,ii] = weighted.mean(y[,ii], w = w)
- }
- }
- for(spp in 1:NOS) {
- junk = lsfit(x, y[,spp], wt = w, intercept = FALSE)
- var.y.est = sum(w * junk$resid^2) / (n - length(junk$coef))
- init.shape[,spp] = if (length( .ishape)) .ishape else
- mymu[,spp]^2 / var.y.est
- if ( .lshape == "loglog") init.shape[init.shape[,spp] <=
- 1,spp] = 3.1 # Hopefully value is big enough
- }
- etastart = cbind(theta2eta(mymu, .lmu, earg = .emu ),
- theta2eta(init.shape, .lshape, earg = .eshape ))
- etastart = etastart[,interleave.VGAM(M, M = Musual), drop = FALSE]
- }
- }), list( .lmu = lmu, .lshape = lshape, .ishape = ishape, .zero = zero,
- .emu = emu, .eshape = eshape,
- .imethod = imethod ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- Musual <- 2
- NOS = ncol(eta) / Musual
- eta2theta(eta[, 2*(1:NOS)-1, drop = FALSE], .lmu, earg = .emu )
- }, list( .lmu = lmu, .emu = emu ))),
- last = eval(substitute(expression({
- if (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
- rm("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)
- tmp34 = c(rep( .lmu, length = NOS),
- rep( .lshape, length = NOS))
- names(tmp34) =
- c(if (NOS == 1) "mu" else paste("mu", 1:NOS, sep = ""),
- if (NOS == 1) "shape" else paste("shape", 1:NOS, sep = ""))
- tmp34 = tmp34[interleave.VGAM(M, M = 2)]
- misc$link = tmp34 # Already named
- misc$earg = vector("list", M)
- misc$Musual <- Musual
- names(misc$earg) = names(misc$link)
- for(ii in 1:NOS) {
- misc$earg[[2*ii-1]] = .emu
- misc$earg[[2*ii ]] = .eshape
- }
- misc$expected = TRUE
- }), list( .lmu = lmu, .lshape = lshape,
- .emu = emu, .eshape = eshape ))),
- linkfun = eval(substitute(function(mu, extra = NULL) {
- temp = theta2eta(mu, .lmu, earg = .emu )
- temp = cbind(temp, NA * temp)
- temp[,interleave.VGAM(ncol(temp), M = 2), drop = FALSE]
- }, list( .lmu = lmu, .emu = emu ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Musual <- 2
- NOS = ncol(eta) / Musual
- mymu = mu # eta2theta(eta[, 2*(1:NOS)-1], .lmu, earg = .emu )
- shapemat = eta2theta(eta[, 2*(1:NOS), drop = FALSE], .lshape, earg = .eshape )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dgamma(x = y, shape = c(shapemat), scale = c(mymu/shapemat),
- log = TRUE))
- }
- }, list( .lmu = lmu, .lshape = lshape,
- .emu = emu, .eshape = eshape ))),
- vfamily = c("gamma2"),
- deriv = eval(substitute(expression({
- Musual <- 2
- NOS = ncol(eta) / Musual
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
- mymu = eta2theta(eta[, 2*(1:NOS)-1], .lmu, earg = .emu )
- shape = eta2theta(eta[, 2*(1:NOS)], .lshape, earg = .eshape )
+ }), list( .lmu = lmu ))))
- dl.dmu = shape * (y / mymu - 1) / mymu
- dl.dshape = log(y) + log(shape) - log(mymu) + 1 - digamma(shape) -
- y / mymu
- dmu.deta = dtheta.deta(mymu, .lmu, earg = .emu )
- dshape.deta = dtheta.deta(shape, .lshape, earg = .eshape )
- myderiv = c(w) * cbind(dl.dmu * dmu.deta,
- dl.dshape * dshape.deta)
- myderiv[, interleave.VGAM(M, M = Musual)]
- }), list( .lmu = lmu, .lshape = lshape,
- .emu = emu, .eshape = eshape ))),
- weight = eval(substitute(expression({
- ed2l.dmu2 = shape / (mymu^2)
- ed2l.dshape2 = trigamma(shape) - 1 / shape
- wz = matrix(as.numeric(NA), n, M) # 2 = M; diagonal!
+ if (deviance.arg) ans at deviance = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- wz[, 2*(1:NOS)-1] = ed2l.dmu2 * dmu.deta^2
- wz[, 2*(1:NOS)] = ed2l.dshape2 * dshape.deta^2
- c(w) * wz
- }), list( .lmu = lmu ))))
- if (deviance.arg) ans at deviance = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- NOS = ncol(eta) / 2
- temp300 = eta[, 2*(1:NOS), drop = FALSE]
- shape = eta2theta(temp300, .lshape, earg = .eshape )
- devi = -2 * (log(y/mu) - y/mu + 1)
- if (residuals) {
- warning("not 100% sure about these deviance residuals!")
- sign(y - mu) * sqrt(abs(devi) * w)
- } else
- sum(w * devi)
- }, list( .lshape = lshape )))
- ans
+ if (ncol(as.matrix(y)) > 1 && ncol(as.matrix(w)) > 1)
+ stop("cannot handle matrix 'w' yet")
+
+
+ Musual <- 2
+ NOS = ncol(eta) / 2
+ temp300 = eta[, 2*(1:NOS), drop = FALSE]
+ shape = eta2theta(temp300, .lshape , earg = .eshape )
+ devi = -2 * (log(y/mu) - y/mu + 1)
+ if (residuals) {
+ warning("not 100% sure about these deviance residuals!")
+ sign(y - mu) * sqrt(abs(devi) * w)
+ } else
+ sum(c(w) * devi)
+ }, list( .lshape = lshape )))
+ ans
}
- geometric = function(link = "logit", earg = list(), expected = TRUE,
- imethod = 1, iprob = NULL)
+ geometric <- function(link = "logit", expected = TRUE,
+ imethod = 1, iprob = NULL, zero = NULL)
{
if (!is.logical(expected) || length(expected) != 1)
stop("bad input for argument 'expected'")
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
if (!is.Numeric(imethod, allowable.length = 1,
integer.valued = TRUE, positive = TRUE) ||
@@ -2650,6 +3056,12 @@ dfelix = function(x, a = 0.25, log = FALSE) {
stop("argument 'imethod' must be 1 or 2 or 3")
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+
new("vglmff",
blurb = c("Geometric distribution ",
"(P[Y=y] = prob * (1 - prob)^y, y = 0, 1, 2,...)\n",
@@ -2657,68 +3069,116 @@ dfelix = function(x, a = 0.25, log = FALSE) {
namesof("prob", link, earg = earg), "\n",
"Mean: mu = (1 - prob) / prob\n",
"Variance: mu * (1 + mu) = (1 - prob) / prob^2"),
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 1
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ zero = .zero )
+ }, list( .zero = zero ))),
+
+
initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a 1-column matrix")
- if (any(y < 0)) stop("all responses must be >= 0")
- if (any(y!=round(y ))) stop("response should be integer-valued")
- predictors.names = namesof("prob", .link, earg = .earg, tag = FALSE)
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ Is.integer.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ ncoly <- ncol(y)
+ Musual <- 1
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+ mynames1 <- paste("prob", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ namesof(mynames1, .link , earg = .earg , tag = FALSE)
+
if (!length(etastart)) {
- prob.init = if ( .imethod == 3)
+ prob.init = if ( .imethod == 2)
1 / (1 + y + 1/16) else
- if ( .imethod == 1)
- 1 / (1 + median(rep(y, w)) + 1/16) else
- 1 / (1 + weighted.mean(y, w) + 1/16)
+ if ( .imethod == 3)
+ 1 / (1 + apply(y, 2, median) + 1/16) else
+ 1 / (1 + colSums(y * w) / colSums(w) + 1/16)
+
+ if (!is.matrix(prob.init))
+ prob.init <- matrix(prob.init, n, M, byrow = TRUE)
if (length( .iprob ))
- prob.init = 0 * prob.init + .iprob
+ prob.init = matrix( .iprob , n, M, byrow = TRUE)
- etastart = theta2eta(prob.init, .link, earg = .earg )
+ etastart <- theta2eta(prob.init, .link , earg = .earg )
}
- }), list( .link = link, .earg = earg, .imethod = imethod,
- .iprob = iprob ))),
+ }), list( .link = link, .earg = earg,
+ .imethod = imethod, .iprob = iprob ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- prob = eta2theta(eta, .link, earg = .earg )
+ prob = eta2theta(eta, .link , earg = .earg )
(1 - prob) / prob
}, list( .link = link, .earg = earg ))),
+
last = eval(substitute(expression({
- misc$link = c(prob = .link)
- misc$earg = list(prob = .earg )
- misc$expected = .expected
- misc$imethod = .imethod
- misc$iprob = .iprob
+ Musual <- extra$Musual
+ misc$link <- c(rep( .link , length = ncoly))
+ names(misc$link) <- mynames1
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- mynames1
+ for(ii in 1:ncoly) {
+ misc$earg[[ii]] <- .earg
+ }
+
+ misc$Musual <- Musual
+ misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
+ misc$expected <- .expected
+ misc$imethod <- .imethod
+ misc$iprob <- .iprob
}), list( .link = link, .earg = earg,
.iprob = iprob,
.expected = expected, .imethod = imethod ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- prob = eta2theta(eta, .link, earg = .earg )
+ prob = eta2theta(eta, .link , earg = .earg )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dgeom(x = y, prob = prob, log = TRUE))
+ sum(c(w) * dgeom(x = y, prob = prob, log = TRUE))
}
}, list( .link = link, .earg = earg ))),
vfamily = c("geometric"),
deriv = eval(substitute(expression({
- prob = eta2theta(eta, .link, earg = .earg )
- dl.dprob = -y / (1-prob) + 1/prob
- dprobdeta = dtheta.deta(prob, .link, earg = .earg )
+ prob = eta2theta(eta, .link , earg = .earg )
+
+ dl.dprob = -y / (1 - prob) + 1 / prob
+
+ dprobdeta = dtheta.deta(prob, .link , earg = .earg )
c(w) * cbind(dl.dprob * dprobdeta)
}), list( .link = link, .earg = earg, .expected = expected ))),
weight = eval(substitute(expression({
- ed2l.dprob2 = if ( .expected ) {
+ ned2l.dprob2 = if ( .expected ) {
1 / (prob^2 * (1 - prob))
} else {
y / (1 - prob)^2 + 1 / prob^2
}
- wz = ed2l.dprob2 * dprobdeta^2
+ wz = ned2l.dprob2 * dprobdeta^2
if ( !( .expected ))
- wz = wz - dl.dprob * d2theta.deta2(prob, .link, earg = .earg )
+ wz = wz - dl.dprob * d2theta.deta2(prob, .link , earg = .earg )
c(w) * wz
}), list( .link = link, .earg = earg,
.expected = expected ))))
@@ -2727,33 +3187,34 @@ dfelix = function(x, a = 0.25, log = FALSE) {
-dbetageom = function(x, shape1, shape2, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
+dbetageom <- function(x, shape1, shape2, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
- if (!is.Numeric(x))
- stop("bad input for argument 'x'")
- if (!is.Numeric(shape1, positive = TRUE))
- stop("bad input for argument 'shape1'")
- if (!is.Numeric(shape2, positive = TRUE))
- stop("bad input for argument 'shape2'")
- N = max(length(x), length(shape1), length(shape2))
- x = rep(x, length.out = N);
- shape1 = rep(shape1, length.out = N);
- shape2 = rep(shape2, length.out = N)
- loglik = lbeta(1+shape1, shape2+abs(x)) - lbeta(shape1, shape2)
- xok = (x == round(x) & x >= 0)
- loglik[!xok] = log(0)
- if (log.arg) {
- loglik
- } else {
- exp(loglik)
- }
+ if (!is.Numeric(x))
+ stop("bad input for argument 'x'")
+ if (!is.Numeric(shape1, positive = TRUE))
+ stop("bad input for argument 'shape1'")
+ if (!is.Numeric(shape2, positive = TRUE))
+ stop("bad input for argument 'shape2'")
+ N = max(length(x), length(shape1), length(shape2))
+ x = rep(x, length.out = N);
+ shape1 = rep(shape1, length.out = N);
+ shape2 = rep(shape2, length.out = N)
+
+ loglik = lbeta(1+shape1, shape2+abs(x)) - lbeta(shape1, shape2)
+ xok = (x == round(x) & x >= 0)
+ loglik[!xok] = log(0)
+ if (log.arg) {
+ loglik
+ } else {
+ exp(loglik)
+ }
}
-pbetageom = function(q, shape1, shape2, log.p = FALSE) {
+pbetageom <- function(q, shape1, shape2, log.p = FALSE) {
if (!is.Numeric(q))
stop("bad input for argument 'q'")
if (!is.Numeric(shape1, positive = TRUE))
@@ -2768,23 +3229,24 @@ pbetageom = function(q, shape1, shape2, log.p = FALSE) {
if (max(abs(shape1-shape1[1])) < 1.0e-08 &&
max(abs(shape2-shape2[1])) < 1.0e-08) {
qstar = floor(q)
- temp = if (max(qstar) >= 0) dbetageom(x=0:max(qstar),
+ temp = if (max(qstar) >= 0) dbetageom(x = 0:max(qstar),
shape1 = shape1[1], shape2 = shape2[1]) else 0*qstar
unq = unique(qstar)
- for(i in unq) {
- index = qstar == i
- ans[index] = if (i >= 0) sum(temp[1:(1+i)]) else 0
+ for(ii in unq) {
+ index <- (qstar == ii)
+ ans[index] = if (ii >= 0) sum(temp[1:(1+ii)]) else 0
}
} else
for(ii in 1:N) {
qstar = floor(q[ii])
- ans[ii] = if (qstar >= 0) sum(dbetageom(x=0:qstar,
+ ans[ii] = if (qstar >= 0) sum(dbetageom(x = 0:qstar,
shape1 = shape1[ii], shape2 = shape2[ii])) else 0
}
if (log.p) log(ans) else ans
}
-rbetageom = function(n, shape1, shape2) {
+
+rbetageom <- function(n, shape1, shape2) {
rgeom(n = n, prob = rbeta(n = n, shape1 = shape1, shape2 = shape2))
}
@@ -2792,26 +3254,22 @@ rbetageom = function(n, shape1, shape2) {
-interleave.VGAM = function(L, M) c(matrix(1:L, nrow=M, byrow = TRUE))
-negbinomial.control <- function(save.weight = TRUE, ...)
-{
+negbinomial.control <- function(save.weight = TRUE, ...) {
list(save.weight = save.weight)
}
- negbinomial = function(lmu = "loge", lsize = "loge",
- emu = list(), esize = list(),
- imu = NULL, isize = NULL,
- quantile.probs = 0.75,
- nsimEIM = 100, cutoff = 0.995, Maxiter = 5000,
- deviance.arg = FALSE, imethod = 1,
- parallel = FALSE,
- shrinkage.init = 0.95, zero = -2)
-{
+ negbinomial <- function(lmu = "loge", lsize = "loge",
+ imu = NULL, isize = NULL,
+ probs.y = 0.75,
+ nsimEIM = 100, cutoff = 0.995, Maxiter = 5000,
+ deviance.arg = FALSE, imethod = 1,
+ parallel = FALSE,
+ shrinkage.init = 0.95, zero = -2) {
@@ -2824,18 +3282,16 @@ negbinomial.control <- function(save.weight = TRUE, ...)
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu = as.character(substitute(lmu))
- if (mode(lsize) != "character" && mode(lsize) != "name")
- lsize = as.character(substitute(lsize))
- lmuuu = lmu
- emuuu = emu
- imuuu = imu
+ lmuuu <- as.list(substitute(lmu))
+ emuuu <- link2list(lmuuu)
+ lmuuu <- attr(emuuu, "function.name")
+ imuuu <- imu
+ lsize <- as.list(substitute(lsize))
+ esize <- link2list(lsize)
+ lsize <- attr(esize, "function.name")
- if (!is.list(emuuu)) emuuu = list()
- if (!is.list(esize)) esize = list()
if (length(imuuu) && !is.Numeric(imuuu, positive = TRUE))
stop("bad input for argument 'imu'")
@@ -2877,6 +3333,9 @@ negbinomial.control <- function(save.weight = TRUE, ...)
ans =
new("vglmff",
+
+
+
blurb = c("Negative-binomial distribution\n\n",
"Links: ",
namesof("mu", lmuuu, earg = emuuu), ", ",
@@ -2892,7 +3351,7 @@ negbinomial.control <- function(save.weight = TRUE, ...)
if ( .parallel && ncol(cbind(y)) > 1)
stop("univariate responses needed if 'parallel = TRUE'")
- constraints = cm.vgam(matrix(1, M, 1), x, .parallel, constraints)
+ constraints = cm.vgam(matrix(1, M, 1), x, .parallel , constraints)
}), list( .parallel = parallel, .zero = zero ))),
infos = eval(substitute(function(...) {
@@ -2903,28 +3362,41 @@ negbinomial.control <- function(save.weight = TRUE, ...)
initialize = eval(substitute(expression({
Musual <- 2
+ temp5 <- w.y.check(w = w, y = y,
+ Is.integer.y = TRUE,
+ ncol.w.max = Inf, ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1, maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
assign("CQO.FastAlgorithm",
( .lmuuu == "loge") && ( .lsize == "loge"),
envir = VGAM:::VGAMenv)
- if (any(function.name == c("cqo","cao")) &&
- is.Numeric( .zero, allowable.length = 1) && .zero != -2)
+
+ if (any(function.name == c("cqo", "cao")) &&
+ is.Numeric( .zero , allowable.length = 1) &&
+ .zero != -2)
stop("argument zero = -2 is required")
+
if (any(y < 0))
stop("negative values not allowed for the 'negbinomial' family")
if (any(round(y) != y))
stop("integer-values only allowed for the 'negbinomial' family")
+ if (ncol(w) > ncol(y))
+ stop("number of columns of prior-'weights' is greater than ",
+ "the number of responses")
-
- y = as.matrix(y)
M = Musual * ncol(y)
- NOS = ncoly = ncol(y) # Number of species
- predictors.names =
+ NOS = ncoly = ncol(y) # Number of species
+ predictors.names <-
c(namesof(if (NOS == 1) "mu" else paste("mu", 1:NOS, sep = ""),
.lmuuu, earg = .emuuu, tag = FALSE),
namesof(if (NOS == 1) "size" else paste("size", 1:NOS, sep = ""),
- .lsize, earg = .esize, tag = FALSE))
- predictors.names = predictors.names[interleave.VGAM(M, M = Musual)]
+ .lsize , earg = .esize , tag = FALSE))
+ predictors.names <- predictors.names[interleave.VGAM(M, M = Musual)]
if (is.null( .nsimEIM )) {
save.weight <- control$save.weight <- FALSE
@@ -2938,18 +3410,18 @@ negbinomial.control <- function(save.weight = TRUE, ...)
mu.init = y
for(iii in 1:ncol(y)) {
use.this = if ( .imethod == 1) {
- weighted.mean(y[, iii], w) + 1/16
+ weighted.mean(y[, iii], w[, iii]) + 1/16
} else if ( .imethod == 3) {
- c(quantile(y[, iii], probs = .quantile.probs ) + 1/16)
+ c(quantile(y[, iii], probs = .probs.y ) + 1/16)
} else {
median(y[, iii]) + 1/16
}
if (is.numeric( .mu.init )) {
- mu.init[, iii] = MU.INIT[, iii]
+ mu.init[, iii] = MU.INIT[, iii]
} else {
medabsres = median(abs(y[, iii] - use.this)) + 1/32
- allowfun = function(z, maxtol=1) sign(z)*pmin(abs(z), maxtol)
+ allowfun <- function(z, maxtol = 1) sign(z) * pmin(abs(z), maxtol)
mu.init[, iii] = use.this + (1 - .sinit ) *
allowfun(y[, iii] - use.this, maxtol = medabsres)
@@ -2960,9 +3432,9 @@ negbinomial.control <- function(save.weight = TRUE, ...)
if ( is.Numeric( .k.init )) {
kay.init = matrix( .k.init, nrow = n, ncol = NOS, byrow = TRUE)
} else {
- negbinomial.Loglikfun = function(kmat, y, x, w, extraargs) {
+ negbinomial.Loglikfun <- function(kmat, y, x, w, extraargs) {
mu = extraargs
- sum(w * dnbinom(x = y, mu = mu, size = kmat, log = TRUE))
+ sum(c(w) * dnbinom(x = y, mu = mu, size = kmat, log = TRUE))
}
k.grid = 2^((-7):7)
k.grid = 2^(seq(-8, 8, length = 40))
@@ -2970,68 +3442,80 @@ negbinomial.control <- function(save.weight = TRUE, ...)
for(spp. in 1:NOS) {
kay.init[, spp.] = getMaxMin(k.grid,
objfun = negbinomial.Loglikfun,
- y = y[, spp.], x = x, w = w,
+ y = y[, spp.], x = x, w = w[, spp.],
extraargs = mu.init[, spp.])
}
}
- newemu = if ( .lmuuu == "nbcanlink") {
- c(list(size = kay.init), .emuuu)
- } else {
- .emuuu
+
+
+ newemu <- .emuuu
+ if ( .lmuuu == "nbcanlink") {
+ newemu$size <- kay.init
}
- etastart = cbind(theta2eta(mu.init , link = .lmuuu , earg = newemu ),
- theta2eta(kay.init, link = .lsize , earg = .esize ))
- etastart = etastart[, interleave.VGAM(M, M = Musual), drop = FALSE]
+
+
+
+
+ etastart <-
+ cbind(theta2eta(mu.init , link = .lmuuu , earg = newemu ),
+ theta2eta(kay.init, link = .lsize , earg = .esize ))
+ etastart <-
+ etastart[, interleave.VGAM(M, M = Musual), drop = FALSE]
}
}), list( .lmuuu = lmuuu, .lsize = lsize,
.emuuu = emuuu, .esize = esize,
.mu.init = imu,
- .k.init = isize, .quantile.probs = quantile.probs,
+ .k.init = isize, .probs.y = probs.y,
.sinit = shrinkage.init, .nsimEIM = nsimEIM,
.zero = zero, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
Musual <- 2
NOS = ncol(eta) / Musual
- eta.k = eta[, Musual*(1:NOS) , drop = FALSE]
+ eta.k = eta[, Musual * (1:NOS) , drop = FALSE]
kmat = eta2theta(eta.k, .lsize , earg = .esize )
- newemu = if ( .lmuuu == "nbcanlink") {
- c(list(size = kmat), .emuuu)
- } else {
- .emuuu
+
+
+
+ newemu <- .emuuu
+ if ( .lmuuu == "nbcanlink") {
+ newemu$size <- kmat
}
- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE], .lmuuu ,
+
+
+ eta2theta(eta[, Musual * (1:NOS) - 1, drop = FALSE], .lmuuu ,
earg = newemu)
}, list( .lmuuu = lmuuu, .lsize = lsize,
- .emuuu = emuuu, .esize = esize ))),
+ .emuuu = emuuu, .esize = esize))),
last = eval(substitute(expression({
if (exists("CQO.FastAlgorithm", envir = VGAM:::VGAMenv))
rm("CQO.FastAlgorithm", envir = VGAM:::VGAMenv)
temp0303 = c(rep( .lmuuu, length = NOS),
- rep( .lsize, length = NOS))
- names(temp0303) = c(if (NOS == 1) "mu" else
- paste("mu", 1:NOS, sep = ""),
- if (NOS == 1) "size" else
- paste("size", 1:NOS, sep = ""))
+ rep( .lsize , length = NOS))
+ names(temp0303) =
+ c(if (NOS == 1) "mu" else paste("mu", 1:NOS, sep = ""),
+ if (NOS == 1) "size" else paste("size", 1:NOS, sep = ""))
temp0303 = temp0303[interleave.VGAM(M, M = 2)]
misc$link = temp0303 # Already named
+
misc$earg = vector("list", M)
names(misc$earg) = names(misc$link)
for(ii in 1:NOS) {
- misc$earg[[Musual*ii-1]] = newemu
- misc$earg[[Musual*ii ]] = .esize
+ misc$earg[[Musual*ii-1]] = newemu
+ misc$earg[[Musual*ii ]] = .esize
}
misc$cutoff = .cutoff
misc$imethod = .imethod
misc$nsimEIM = .nsimEIM
misc$expected = TRUE
- misc$shrinkage.init = .sinit
+ misc$shrinkage.init <- .sinit
+ misc$multipleResponses <- TRUE
}), list( .lmuuu = lmuuu, .lsize = lsize,
.emuuu = emuuu, .esize = esize,
.cutoff = cutoff,
@@ -3049,12 +3533,16 @@ negbinomial.control <- function(save.weight = TRUE, ...)
eta.kayy = 0 * eta.temp + eta.kayy # Right dimension now.
- newemu = if ( .lmuuu == "nbcanlink") {
- c(list(size = eta2theta(eta.kayy, .lsize, earg = .esize)), .emuuu)
- } else {
- .emuuu
+
+
+
+ newemu <- .emuuu
+ if ( .lmuuu == "nbcanlink") {
+ newemu$size <- eta2theta(eta.kayy, .lsize , earg = .esize )
}
+
+
eta.temp = cbind(eta.temp, eta.kayy)
eta.temp[, interleave.VGAM(ncol(eta.temp), M = Musual), drop = FALSE]
}, list( .lmuuu = lmuuu, .lsize = lsize,
@@ -3074,19 +3562,24 @@ negbinomial.control <- function(save.weight = TRUE, ...)
}
kmat = eta2theta(eta.k, .lsize , earg = .esize )
- newemu = if ( .lmuuu == "nbcanlink") {
- c(list(size = kmat), .emuuu)
- } else {
- .emuuu
- }
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * dnbinom(x = y, mu = mu, size = kmat, log = TRUE))
+
+ newemu <- .emuuu
+ if ( .lmuuu == "nbcanlink") {
+ newemu$size <- kmat
+ }
+
+
+
+
+
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(c(w) * dnbinom(x = y, mu = mu, size = kmat, log = TRUE))
}, list( .lsize = lsize,
- .lmuuu = lmuuu, .emuuu = emuuu, .esize = esize ))),
+ .lmuuu = lmuuu, .emuuu = emuuu, .esize = esize))),
vfamily = c("negbinomial"),
@@ -3102,20 +3595,28 @@ negbinomial.control <- function(save.weight = TRUE, ...)
}
kmat = eta2theta(eta.k, .lsize , earg = .esize )
- newemu = if ( .lmuuu == "nbcanlink") {
- c(list(size = kmat), .emuuu)
- } else {
- .emuuu
+
+
+
+ newemu <- .emuuu
+ if ( .lmuuu == "nbcanlink") {
+ newemu$size <- kmat
}
+
+
dl.dmu = y / mu - (y + kmat) / (mu + kmat)
dl.dk = digamma(y + kmat) - digamma(kmat) -
(y + kmat) / (mu + kmat) + 1 + log(kmat / (kmat + mu))
- dmu.deta = dtheta.deta(mu, .lmuuu ,
- earg = c(list(wrt.eta = 1), newemu)) # eta1
- dk.deta1 = dtheta.deta(mu, .lmuuu ,
- earg = c(list(wrt.eta = 2), newemu))
+ if ( .lmuuu == "nbcanlink")
+ newemu$wrt.eta <- 1
+ dmu.deta = dtheta.deta(mu, .lmuuu , earg = newemu) # eta1
+
+ if ( .lmuuu == "nbcanlink")
+ newemu$wrt.eta <- 2
+ dk.deta1 = dtheta.deta(mu, .lmuuu , earg = newemu) # eta2
+
dk.deta2 = dtheta.deta(kmat, .lsize , earg = .esize)
myderiv = c(w) * cbind(dl.dmu * dmu.deta,
@@ -3127,9 +3628,12 @@ negbinomial.control <- function(save.weight = TRUE, ...)
myderiv[, 1:NOS] + c(w) * dl.dk * dk.deta1
}
+
+
+
myderiv[, interleave.VGAM(M, M = Musual)]
}), list( .lmuuu = lmuuu, .lsize = lsize,
- .emuuu = emuuu, .esize = esize ))),
+ .emuuu = emuuu, .esize = esize))),
weight = eval(substitute(expression({
wz = matrix(as.numeric(NA), n, M)
@@ -3171,13 +3675,13 @@ negbinomial.control <- function(save.weight = TRUE, ...)
} # end of else
- ed2l.dmu2 = 1 / mu - 1 / (mu + kmat)
- wz[, Musual*(1:NOS)-1] = ed2l.dmu2 * dmu.deta^2
+ ed2l.dmu2 <- 1 / mu - 1 / (mu + kmat)
+ wz[, Musual*(1:NOS) - 1] = ed2l.dmu2 * dmu.deta^2
if ( .lmuuu == "nbcanlink") {
- wz[, Musual*(1:NOS)-1] =
+ wz[, Musual*(1:NOS)-1] <-
wz[, Musual*(1:NOS)-1] + ed2l.dk2 * dk.deta1^2
wz = cbind(wz,
@@ -3186,7 +3690,9 @@ negbinomial.control <- function(save.weight = TRUE, ...)
}
- c(w) * wz
+
+
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
}), list( .cutoff = cutoff,
.Maxiter = Maxiter,
.lmuuu = lmuuu,
@@ -3198,22 +3704,29 @@ negbinomial.control <- function(save.weight = TRUE, ...)
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
Musual = 2
NOS = ncol(eta) / Musual
+
+
+
+ if (ncol(as.matrix(y)) > 1 && ncol(as.matrix(w)) > 1)
+ stop("cannot handle matrix 'w' yet")
+
+
temp300 = eta[, Musual*(1:NOS), drop = FALSE]
if ( .lsize == "loge") {
bigval = 68
temp300[temp300 > bigval] = bigval
temp300[temp300 < -bigval] = -bigval
} else stop("can only handle the 'loge' link")
- kmat = eta2theta(temp300, .lsize, earg = .esize )
- devi = 2 * (y * log(ifelse(y < 1, 1, y)/mu) +
- (y+kmat) * log((mu+kmat)/(kmat+y)))
+ kmat = eta2theta(temp300, .lsize , earg = .esize )
+ devi = 2 * (y * log(ifelse(y < 1, 1, y) / mu) +
+ (y + kmat) * log((mu + kmat) / (kmat + y)))
if (residuals) {
sign(y - mu) * sqrt(abs(devi) * w)
} else {
- sum(w * devi)
+ sum(c(w) * devi)
}
}, list( .lsize = lsize, .emuuu = emuuu,
- .esize = esize )))
+ .esize = esize)))
ans
}
@@ -3233,9 +3746,8 @@ polya.control <- function(save.weight = TRUE, ...)
polya <-
function(lprob = "logit", lsize = "loge",
- eprob = list(), esize = list(),
iprob = NULL, isize = NULL,
- quantile.probs = 0.75,
+ probs.y = 0.75,
nsimEIM = 100,
deviance.arg = FALSE, imethod = 1,
shrinkage.init = 0.95, zero = -2)
@@ -3257,22 +3769,25 @@ polya.control <- function(save.weight = TRUE, ...)
shrinkage.init > 1)
stop("bad input for argument 'shrinkage.init'")
- if (!is.Numeric(nsimEIM, allowable.length = 1, integer.valued = TRUE))
+ if (!is.Numeric(nsimEIM, allowable.length = 1,
+ integer.valued = TRUE))
stop("bad input for argument 'nsimEIM'")
if (nsimEIM <= 10)
warning("argument 'nsimEIM' should be an integer ",
"greater than 10, say")
- if (mode(lprob) != "character" && mode(lprob) != "name")
- lprob = as.character(substitute(lprob))
- if (mode(lsize) != "character" && mode(lsize) != "name")
- lsize = as.character(substitute(lsize))
- if (!is.list(eprob)) eprob = list()
- if (!is.list(esize)) esize = list()
+ lprob <- as.list(substitute(lprob))
+ eprob <- link2list(lprob)
+ lprob <- attr(eprob, "function.name")
+ lsize <- as.list(substitute(lsize))
+ esize <- link2list(lsize)
+ lsize <- attr(esize, "function.name")
- ans =
+
+
+ ans =
new("vglmff",
blurb = c("Polya (negative-binomial) distribution\n\n",
"Links: ",
@@ -3299,23 +3814,29 @@ polya.control <- function(save.weight = TRUE, ...)
stop("polya() does not work with cqo() or cao(). ",
"Try negbinomial()")
- if (any(y < 0))
- stop("negative values not allowed for the 'polya' family")
- if (any(round(y) != y))
- stop("integer-values only allowed for the 'polya' family")
- y = as.matrix(y)
- M = 2 * ncol(y)
+
+ temp5 <- w.y.check(w = w, y = y,
+ Is.integer.y = TRUE,
+ ncol.w.max = Inf, ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1, maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+ M = Musual * ncol(y)
NOS = ncoly = ncol(y) # Number of species
- predictors.names =
+ predictors.names <-
c(namesof(if (NOS == 1) "prob" else
paste("prob", 1:NOS, sep = ""),
- .lprob, earg = .eprob, tag = FALSE),
+ .lprob , earg = .eprob , tag = FALSE),
namesof(if (NOS == 1) "size" else
paste("size", 1:NOS, sep = ""),
- .lsize, earg = .esize, tag = FALSE))
- predictors.names = predictors.names[interleave.VGAM(M, M = 2)]
+ .lsize , earg = .esize , tag = FALSE))
+ predictors.names <- predictors.names[interleave.VGAM(M, M = 2)]
if (is.null( .nsimEIM )) {
save.weight <- control$save.weight <- FALSE
@@ -3332,9 +3853,9 @@ polya.control <- function(save.weight = TRUE, ...)
mu.init = y
for(iii in 1:ncol(y)) {
use.this = if ( .imethod == 1) {
- weighted.mean(y[, iii], w) + 1/16
+ weighted.mean(y[, iii], w[, iii]) + 1/16
} else if ( .imethod == 3) {
- c(quantile(y[, iii], probs = .quantile.probs) + 1/16)
+ c(quantile(y[, iii], probs = .probs.y) + 1/16)
} else {
median(y[, iii]) + 1/16
}
@@ -3343,7 +3864,7 @@ polya.control <- function(save.weight = TRUE, ...)
mu.init[, iii] = MU.INIT[, iii]
} else {
medabsres = median(abs(y[, iii] - use.this)) + 1/32
- allowfun = function(z, maxtol = 1) sign(z) * pmin(abs(z), maxtol)
+ allowfun <- function(z, maxtol = 1) sign(z) * pmin(abs(z), maxtol)
mu.init[, iii] = use.this + (1 - .sinit) * allowfun(y[, iii] -
use.this, maxtol = medabsres)
@@ -3356,18 +3877,18 @@ polya.control <- function(save.weight = TRUE, ...)
if ( is.Numeric( .kinit )) {
kayy.init = matrix( .kinit, nrow = n, ncol = NOS, byrow = TRUE)
} else {
- negbinomial.Loglikfun = function(kmat, y, x, w, extraargs) {
+ negbinomial.Loglikfun <- function(kmat, y, x, w, extraargs) {
mu = extraargs
- sum(w * dnbinom(x = y, mu = mu, size = kmat, log = TRUE))
+ sum(c(w) * dnbinom(x = y, mu = mu, size = kmat, log = TRUE))
}
k.grid = 2^((-7):7)
k.grid = 2^(seq(-8, 8, length = 40))
kayy.init = matrix(0, nrow = n, ncol = NOS)
for(spp. in 1:NOS) {
- kayy.init[,spp.] = getMaxMin(k.grid,
+ kayy.init[, spp.] = getMaxMin(k.grid,
objfun = negbinomial.Loglikfun,
- y = y[,spp.], x = x, w = w,
- extraargs = mu.init[,spp.])
+ y = y[, spp.], x = x, w = w,
+ extraargs = mu.init[, spp.])
}
}
@@ -3375,29 +3896,31 @@ polya.control <- function(save.weight = TRUE, ...)
kayy.init / (kayy.init + mu.init)
- etastart = cbind(theta2eta(prob.init, .lprob, earg = .eprob),
- theta2eta(kayy.init, .lsize, earg = .esize))
- etastart = etastart[, interleave.VGAM(M, M = Musual), drop = FALSE]
+ etastart <-
+ cbind(theta2eta(prob.init, .lprob , earg = .eprob),
+ theta2eta(kayy.init, .lsize , earg = .esize))
+ etastart <-
+ etastart[, interleave.VGAM(M, M = Musual), drop = FALSE]
}
}), list( .lprob = lprob, .lsize = lsize,
.eprob = eprob, .esize = esize,
.pinit = iprob, .kinit = isize,
- .quantile.probs = quantile.probs,
+ .probs.y = probs.y,
.sinit = shrinkage.init, .nsimEIM = nsimEIM, .zero = zero,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
Musual = 2
NOS = ncol(eta) / Musual
pmat = eta2theta(eta[, Musual*(1:NOS) - 1, drop = FALSE],
- .lprob, earg = .eprob)
+ .lprob , earg = .eprob)
kmat = eta2theta(eta[, Musual*(1:NOS)- 0, drop = FALSE],
- .lsize, earg = .esize)
+ .lsize , earg = .esize)
kmat / (kmat + pmat)
}, list( .lprob = lprob, .eprob = eprob,
- .lsize = lsize, .esize = esize ))),
+ .lsize = lsize, .esize = esize))),
last = eval(substitute(expression({
- temp0303 = c(rep( .lprob, length = NOS),
- rep( .lsize, length = NOS))
+ temp0303 = c(rep( .lprob , length = NOS),
+ rep( .lsize , length = NOS))
names(temp0303) =
c(if (NOS == 1) "prob" else paste("prob", 1:NOS, sep = ""),
if (NOS == 1) "size" else paste("size", 1:NOS, sep = ""))
@@ -3407,8 +3930,8 @@ polya.control <- function(save.weight = TRUE, ...)
misc$earg = vector("list", M)
names(misc$earg) = names(misc$link)
for(ii in 1:NOS) {
- misc$earg[[2*ii-1]] = .eprob
- misc$earg[[2*ii ]] = .esize
+ misc$earg[[Musual*ii-1]] = .eprob
+ misc$earg[[Musual*ii ]] = .esize
}
misc$isize = .isize
@@ -3417,6 +3940,7 @@ polya.control <- function(save.weight = TRUE, ...)
misc$expected = TRUE
misc$shrinkage.init = .sinit
misc$Musual = 2
+ misc$multipleResponses <- TRUE
}), list( .lprob = lprob, .lsize = lsize,
.eprob = eprob, .esize = esize,
.isize = isize,
@@ -3429,17 +3953,17 @@ polya.control <- function(save.weight = TRUE, ...)
Musual = 2
NOS = ncol(eta) / Musual
pmat = eta2theta(eta[, Musual*(1:NOS) - 1, drop = FALSE],
- .lprob, earg = .eprob)
+ .lprob , earg = .eprob)
temp300 = eta[, Musual*(1:NOS) , drop = FALSE]
if ( .lsize == "loge") {
bigval = 68
temp300 = ifelse(temp300 > bigval, bigval, temp300)
temp300 = ifelse(temp300 < -bigval, -bigval, temp300)
}
- kmat = eta2theta(temp300, .lsize, earg = .esize)
+ kmat = eta2theta(temp300, .lsize , earg = .esize)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
- sum(w * dnbinom(x = y, prob = pmat, size = kmat, log = TRUE))
+ sum(c(w) * dnbinom(x = y, prob = pmat, size = kmat, log = TRUE))
}, list( .lsize = lsize, .lprob = lprob,
.esize = esize, .eprob = eprob ))),
vfamily = c("polya"),
@@ -3449,28 +3973,28 @@ polya.control <- function(save.weight = TRUE, ...)
M = ncol(eta)
pmat = eta2theta(eta[, Musual*(1:NOS) - 1, drop = FALSE],
- .lprob, earg = .eprob)
+ .lprob , earg = .eprob)
temp3 = eta[, Musual*(1:NOS) , drop = FALSE]
if ( .lsize == "loge") {
bigval = 68
temp3 = ifelse(temp3 > bigval, bigval, temp3)
temp3 = ifelse(temp3 < -bigval, -bigval, temp3)
}
- kmat = eta2theta(temp3, .lsize, earg = .esize)
+ kmat = eta2theta(temp3, .lsize , earg = .esize)
dl.dprob = kmat / pmat - y / (1.0 - pmat)
dl.dkayy = digamma(y + kmat) - digamma(kmat) + log(pmat)
- dprob.deta = dtheta.deta(pmat, .lprob, earg = .eprob)
- dkayy.deta = dtheta.deta(kmat, .lsize, earg = .esize)
+ dprob.deta = dtheta.deta(pmat, .lprob , earg = .eprob)
+ dkayy.deta = dtheta.deta(kmat, .lsize , earg = .esize)
dthetas.detas = cbind(dprob.deta, dkayy.deta)
dThetas.detas = dthetas.detas[, interleave.VGAM(M, M = Musual)]
myderiv = c(w) * cbind(dl.dprob, dl.dkayy) * dthetas.detas
myderiv[, interleave.VGAM(M, M = Musual)]
}), list( .lprob = lprob, .lsize = lsize,
- .eprob = eprob, .esize = esize ))),
+ .eprob = eprob, .esize = esize))),
weight = eval(substitute(expression({
- wz = matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
+ wz = matrix(0.0, n, M + M - 1) # wz is 'tridiagonal'
ind1 = iam(NA, NA, M = Musual, both = TRUE, diag = TRUE)
mumat = as.matrix(mu)
@@ -3512,7 +4036,8 @@ polya.control <- function(save.weight = TRUE, ...)
} # End of for(spp.) loop
- c(w) * wz
+
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
}), list( .nsimEIM = nsimEIM ))))
@@ -3520,8 +4045,18 @@ polya.control <- function(save.weight = TRUE, ...)
if (deviance.arg) ans at deviance = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- NOS = ncol(eta) / 2
- temp300 = eta[, 2*(1:NOS), drop = FALSE]
+ Musual = 2
+ NOS = ncol(eta) / Musual
+ temp300 = eta[, Musual*(1:NOS), drop = FALSE]
+
+
+
+
+ if (ncol(as.matrix(y)) > 1 && ncol(as.matrix(w)) > 1)
+ stop("cannot handle matrix 'w' yet")
+
+
+
if ( .lsize == "loge") {
bigval = 68
temp300[temp300 > bigval] = bigval
@@ -3529,14 +4064,14 @@ polya.control <- function(save.weight = TRUE, ...)
} else {
stop("can only handle the 'loge' link")
}
- kayy = eta2theta(temp300, .lsize, earg = .esize)
+ kayy = eta2theta(temp300, .lsize , earg = .esize)
devi = 2 * (y * log(ifelse(y < 1, 1, y) / mu) +
(y + kayy) * log((mu + kayy) / (kayy + y)))
if (residuals)
sign(y - mu) * sqrt(abs(devi) * w) else
- sum(w * devi)
+ sum(c(w) * devi)
}, list( .lsize = lsize, .eprob = eprob,
- .esize = esize )))
+ .esize = esize)))
ans
} # End of polya()
@@ -3544,47 +4079,53 @@ polya.control <- function(save.weight = TRUE, ...)
- simple.poisson = function()
+ simple.poisson <- function()
{
- new("vglmff",
- blurb = c("Poisson distribution\n\n",
- "Link: log(lambda)",
- "\n",
- "Variance: lambda"),
- deviance= function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- nz = y > 0
- devi = - (y - mu)
- devi[nz] = devi[nz] + y[nz] * log(y[nz]/mu[nz])
- if (residuals) sign(y - mu) * sqrt(2 * abs(devi) * w) else
- 2 * sum(w * devi)
- },
- initialize=expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names = "log(lambda)"
- mu = y + 0.167 * (y == 0)
- if (!length(etastart))
- etastart = log(mu)
- }),
- linkinv = function(eta, extra = NULL)
- exp(eta),
- last = expression({
- misc$link = c(lambda = "loge")
- misc$earg = list(lambda = list())
- }),
- link = function(mu, extra = NULL)
- log(mu),
- vfamily = "simple.poisson",
- deriv=expression({
- lambda = mu
- dl.dlambda = -1 + y/lambda
- dlambda.deta = dtheta.deta(theta=lambda, link = "loge", earg = list())
- c(w) * dl.dlambda * dlambda.deta
- }),
- weight = expression({
- d2l.dlambda2 = 1 / lambda
- c(w) * d2l.dlambda2 * dlambda.deta^2
- }))
+ new("vglmff",
+ blurb = c("Poisson distribution\n\n",
+ "Link: log(lambda)",
+ "\n",
+ "Variance: lambda"),
+ deviance = function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ nz <- y > 0
+ devi <- - (y - mu)
+ devi[nz] <- devi[nz] + y[nz] * log(y[nz]/mu[nz])
+ if (residuals) sign(y - mu) * sqrt(2 * abs(devi) * w) else
+ 2 * sum(c(w) * devi)
+ },
+ initialize = expression({
+ if (ncol(cbind(w)) != 1)
+ stop("prior weight must be a vector or a one-column matrix")
+
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+
+ predictors.names <- "log(lambda)"
+
+ mu <- (weighted.mean(y, w) + y) / 2 + 1/8
+
+ if (!length(etastart))
+ etastart <- log(mu)
+ }),
+ linkinv = function(eta, extra = NULL)
+ exp(eta),
+ last = expression({
+ misc$link <- c(lambda = "loge")
+ misc$earg <- list(lambda = list())
+ }),
+ link = function(mu, extra = NULL)
+ log(mu),
+ vfamily = "simple.poisson",
+ deriv = expression({
+ lambda <- mu
+ dl.dlambda <- -1 + y/lambda
+ dlambda.deta <- dtheta.deta(theta = lambda, link = "loge")
+ c(w) * dl.dlambda * dlambda.deta
+ }),
+ weight = expression({
+ d2l.dlambda2 <- 1 / lambda
+ c(w) * d2l.dlambda2 * dlambda.deta^2
+ }))
}
@@ -3599,19 +4140,16 @@ polya.control <- function(save.weight = TRUE, ...)
- studentt <- function(ldf = "loglog", edf = list(), idf = NULL,
- tol1 = 0.1,
- imethod = 1)
+ studentt <- function(ldf = "loglog", idf = NULL,
+ tol1 = 0.1, imethod = 1)
{
- if (mode(ldf) != "character" && mode(ldf) != "name")
- ldf <- as.character(substitute(ldf))
- ldof <- ldf
- edof <- edf
+ ldof <- as.list(substitute(ldf))
+ edof <- link2list(ldof)
+ ldof <- attr(edof, "function.name")
idof <- idf
- if (!is.list(edof)) edof <- list()
if (length(idof))
if (!is.Numeric(idof) || any(idof <= 1))
@@ -3636,10 +4174,11 @@ polya.control <- function(save.weight = TRUE, ...)
tol1 = .tol1 )
}, list( .tol1 = tol1 ))),
initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names <- namesof("df", .ldof, earg = .edof, tag = FALSE)
+ w.y.check(w = w, y = y)
+
+
+ predictors.names <- namesof("df", .ldof , earg = .edof , tag = FALSE)
if (!length(etastart)) {
@@ -3662,7 +4201,7 @@ polya.control <- function(save.weight = TRUE, ...)
}), list( .ldof = ldof, .edof = edof, .idof = idof,
.tol1 = tol1, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Dof <- eta2theta(eta, .ldof, earg = .edof)
+ Dof <- eta2theta(eta, .ldof , earg = .edof )
ans <- 0 * eta
ans[Dof <= 1] <- NA
ans
@@ -3676,16 +4215,16 @@ polya.control <- function(save.weight = TRUE, ...)
.edof = edof, .imethod = imethod ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Dof <- eta2theta(eta, .ldof, earg = .edof)
+ Dof <- eta2theta(eta, .ldof , earg = .edof )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dt(x = y, df = Dof, log = TRUE))
+ sum(c(w) * dt(x = y, df = Dof, log = TRUE))
}
}, list( .ldof = ldof, .edof = edof ))),
vfamily = c("studentt"),
deriv = eval(substitute(expression({
- Dof <- eta2theta(eta, .ldof, earg = .edof)
- ddf.deta <- dtheta.deta(theta = Dof, .ldof, earg = .edof)
+ Dof <- eta2theta(eta, .ldof , earg = .edof )
+ ddf.deta <- dtheta.deta(theta = Dof, .ldof , earg = .edof )
DDS <- function(df) digamma((df + 1) / 2) - digamma(df / 2)
DDSp <- function(df) 0.5 * (trigamma((df + 1) / 2) - trigamma(df / 2))
@@ -3702,9 +4241,9 @@ polya.control <- function(save.weight = TRUE, ...)
const2[!is.finite(Dof)] <- 1 # Handles Inf
tmp6 = DDS(Dof)
- edl2.dnu2 <- 0.5 * (tmp6 * (const2 * tmp6 - 2 / (Dof + 1)) - DDSp(Dof))
+ nedl2.dnu2 <- 0.5 * (tmp6 * (const2 * tmp6 - 2 / (Dof + 1)) - DDSp(Dof))
- wz <- c(w) * edl2.dnu2 * ddf.deta^2
+ wz <- c(w) * nedl2.dnu2 * ddf.deta^2
wz
}), list( .ldof = ldof, .edof = edof ))))
}
@@ -3737,9 +4276,9 @@ polya.control <- function(save.weight = TRUE, ...)
- studentt3 <- function(llocation = "identity", elocation = list(),
- lscale = "loge", escale = list(),
- ldf = "loglog", edf = list(),
+ studentt3 <- function(llocation = "identity",
+ lscale = "loge",
+ ldf = "loglog",
ilocation = NULL, iscale = NULL, idf = NULL,
imethod = 1,
zero = -(2:3))
@@ -3747,23 +4286,22 @@ polya.control <- function(save.weight = TRUE, ...)
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation <- as.character(substitute(llocation))
- if (!is.list(elocation)) elocation <- list()
-
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale <- as.character(substitute(lscale))
- if (!is.list(escale)) escale <- list()
+ lloc <- as.list(substitute(llocation))
+ eloc <- link2list(lloc)
+ lloc <- attr(eloc, "function.name")
- if (mode(ldf) != "character" && mode(ldf) != "name")
- ldf <- as.character(substitute(ldf))
- if (!is.list(edf)) edf <- list()
+ lsca <- as.list(substitute(lscale))
+ esca <- link2list(lsca)
+ lsca <- attr(esca, "function.name")
+ ldof <- as.list(substitute(ldf))
+ edof <- link2list(ldof)
+ ldof <- attr(edof, "function.name")
- lloc <- llocation; lsca <- lscale; ldof <- ldf
- eloc <- elocation; esca <- escale; edof <- edf
- iloc <- ilocation; isca <- iscale; idof <- idf
+ iloc <- ilocation
+ isca <- iscale
+ idof <- idf
if (!is.Numeric(imethod, allowable.length = 1,
@@ -3781,6 +4319,8 @@ polya.control <- function(save.weight = TRUE, ...)
if (!is.Numeric(idof) || any(idof <= 1))
stop("argument 'idf' should be > 1")
+
+
new("vglmff",
blurb = c("Student t-distribution\n\n",
"Link: ",
@@ -3800,10 +4340,18 @@ polya.control <- function(save.weight = TRUE, ...)
}, list( .zero = zero ))),
initialize = eval(substitute(expression({
Musual <- 3
- if (ncol(cbind(w)) != 1)
- stop("prior weights must be a vector or a one-column matrix")
- y <- as.matrix(y)
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = Inf, ncol.y.max = Inf,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
extra$Musual <- Musual
M <- Musual * ncoly #
@@ -3812,18 +4360,17 @@ polya.control <- function(save.weight = TRUE, ...)
mynames2 <- paste("scale", if (NOS > 1) 1:NOS else "", sep = "")
mynames3 <- paste("df", if (NOS > 1) 1:NOS else "", sep = "")
predictors.names <-
- c(namesof(mynames1, .lloc, earg = .eloc, tag = FALSE),
- namesof(mynames2, .lsca, earg = .esca, tag = FALSE),
- namesof(mynames3, .ldof, earg = .edof, tag = FALSE))
+ c(namesof(mynames1, .lloc , earg = .eloc , tag = FALSE),
+ namesof(mynames2, .lsca , earg = .esca , tag = FALSE),
+ namesof(mynames3, .ldof , earg = .edof , tag = FALSE))
predictors.names <-
predictors.names[interleave.VGAM(Musual * NOS, M = Musual)]
if (!length(etastart)) {
-
init.loc <- if (length( .iloc )) .iloc else {
if ( .imethod == 2) apply(y, 2, median) else
- if ( .imethod == 3) y else {
- colSums(w * y) / sum(w)
+ if ( .imethod == 3) (colMeans(y) + t(y)) / 2 else {
+ colSums(w * y) / colSums(w)
}
}
@@ -3844,11 +4391,11 @@ polya.control <- function(save.weight = TRUE, ...)
if (!is.Numeric(init.dof) || init.dof <= 1)
init.dof <- rep(3, length.out = ncoly)
- mat1 <- matrix(theta2eta(init.loc, .lloc, earg = .eloc), n, NOS,
+ mat1 <- matrix(theta2eta(init.loc, .lloc , earg = .eloc ), n, NOS,
byrow = TRUE)
- mat2 <- matrix(theta2eta(init.sca, .lsca, earg = .esca), n, NOS,
+ mat2 <- matrix(theta2eta(init.sca, .lsca , earg = .esca ), n, NOS,
byrow = TRUE)
- mat3 <- matrix(theta2eta(init.dof, .ldof, earg = .edof), n, NOS,
+ mat3 <- matrix(theta2eta(init.dof, .ldof , earg = .edof ), n, NOS,
byrow = TRUE)
etastart <- cbind(mat1, mat2, mat3)
etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
@@ -3860,8 +4407,8 @@ polya.control <- function(save.weight = TRUE, ...)
linkinv = eval(substitute(function(eta, extra = NULL) {
NOS <- extra$NOS
Musual <- extra$Musual
- Loc <- eta2theta(eta[, Musual*(1:NOS)-2], .lloc, earg = .eloc)
- Dof <- eta2theta(eta[, Musual*(1:NOS)-0], .ldof, earg = .edof)
+ Loc <- eta2theta(eta[, Musual*(1:NOS)-2], .lloc , earg = .eloc )
+ Dof <- eta2theta(eta[, Musual*(1:NOS)-0], .ldof , earg = .edof )
Loc[Dof <= 1] <- NA
Loc
}, list( .lloc = lloc, .eloc = eloc,
@@ -3869,9 +4416,9 @@ polya.control <- function(save.weight = TRUE, ...)
.ldof = ldof, .edof = edof ))),
last = eval(substitute(expression({
Musual <- extra$Musual
- misc$link <- c(rep( .lloc, length = NOS),
- rep( .lsca, length = NOS),
- rep( .ldof, length = NOS))
+ misc$link <- c(rep( .lloc , length = NOS),
+ rep( .lsca , length = NOS),
+ rep( .ldof , length = NOS))
misc$link <- misc$link[interleave.VGAM(Musual * NOS, M = Musual)]
temp.names <- c(mynames1, mynames2, mynames3)
temp.names <- temp.names[interleave.VGAM(Musual * NOS, M = Musual)]
@@ -3880,14 +4427,15 @@ polya.control <- function(save.weight = TRUE, ...)
misc$earg <- vector("list", Musual * NOS)
names(misc$earg) <- temp.names
for(ii in 1:NOS) {
- misc$earg[[Musual*ii-2]] <- .eloc
- misc$earg[[Musual*ii-1]] <- .esca
- misc$earg[[Musual*ii ]] <- .edof
+ misc$earg[[Musual*ii-2]] <- .eloc
+ misc$earg[[Musual*ii-1]] <- .esca
+ misc$earg[[Musual*ii ]] <- .edof
}
misc$Musual <- Musual
misc$imethod <- .imethod
misc$expected = TRUE
+ misc$multipleResponses <- TRUE
}), list( .lloc = lloc, .eloc = eloc,
.lsca = lsca, .esca = esca,
.ldof = ldof, .edof = edof,
@@ -3896,13 +4444,13 @@ polya.control <- function(save.weight = TRUE, ...)
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
NOS <- extra$NOS
Musual <- extra$Musual
- Loc <- eta2theta(eta[, Musual*(1:NOS)-2], .lloc, earg = .eloc)
- Sca <- eta2theta(eta[, Musual*(1:NOS)-1], .lsca, earg = .esca)
- Dof <- eta2theta(eta[, Musual*(1:NOS)-0], .ldof, earg = .edof)
+ Loc <- eta2theta(eta[, Musual*(1:NOS)-2], .lloc , earg = .eloc )
+ Sca <- eta2theta(eta[, Musual*(1:NOS)-1], .lsca , earg = .esca )
+ Dof <- eta2theta(eta[, Musual*(1:NOS)-0], .ldof , earg = .edof )
zedd <- (y - Loc) / Sca
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * (dt(x = zedd, df = Dof, log = TRUE) - log(Sca)))
+ sum(c(w) * (dt(x = zedd, df = Dof, log = TRUE) - log(Sca)))
}
}, list( .lloc = lloc, .eloc = eloc,
.lsca = lsca, .esca = esca,
@@ -3911,13 +4459,13 @@ polya.control <- function(save.weight = TRUE, ...)
deriv = eval(substitute(expression({
Musual <- extra$Musual
NOS <- extra$NOS
- Loc <- eta2theta(eta[, Musual*(1:NOS)-2], .lloc, earg = .eloc)
- Sca <- eta2theta(eta[, Musual*(1:NOS)-1], .lsca, earg = .esca)
- Dof <- eta2theta(eta[, Musual*(1:NOS)-0], .ldof, earg = .edof)
+ Loc <- eta2theta(eta[, Musual*(1:NOS)-2], .lloc , earg = .eloc )
+ Sca <- eta2theta(eta[, Musual*(1:NOS)-1], .lsca , earg = .esca )
+ Dof <- eta2theta(eta[, Musual*(1:NOS)-0], .ldof , earg = .edof )
- dloc.deta <- cbind(dtheta.deta(theta = Loc, .lloc, earg = .eloc))
- dsca.deta <- cbind(dtheta.deta(theta = Sca, .lsca, earg = .esca))
- ddof.deta <- cbind(dtheta.deta(theta = Dof, .ldof, earg = .edof))
+ dloc.deta <- cbind(dtheta.deta(theta = Loc, .lloc , earg = .eloc ))
+ dsca.deta <- cbind(dtheta.deta(theta = Sca, .lsca , earg = .esca ))
+ ddof.deta <- cbind(dtheta.deta(theta = Dof, .ldof , earg = .edof ))
zedd <- (y - Loc) / Sca
temp0 <- 1 / Dof
@@ -3976,7 +4524,9 @@ polya.control <- function(save.weight = TRUE, ...)
while (all(wz[, ncol(wz)] == 0))
wz <- wz[, -ncol(wz)]
- c(w) * wz
+
+
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
}), list( .lloc = lloc, .eloc = eloc,
.lsca = lsca, .esca = esca,
.ldof = ldof, .edof = edof ))))
@@ -3987,29 +4537,26 @@ polya.control <- function(save.weight = TRUE, ...)
studentt2 <- function(df = Inf,
- llocation = "identity", elocation = list(),
- lscale = "loge", escale = list(),
+ llocation = "identity",
+ lscale = "loge",
ilocation = NULL, iscale = NULL,
imethod = 1,
zero = -2)
{
+ lloc <- as.list(substitute(llocation))
+ eloc <- link2list(lloc)
+ lloc <- attr(eloc, "function.name")
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation <- as.character(substitute(llocation))
+ lsca <- as.list(substitute(lscale))
+ esca <- link2list(lsca)
+ lsca <- attr(esca, "function.name")
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale <- as.character(substitute(lscale))
- lloc <- llocation; lsca <- lscale
- eloc <- elocation; esca <- escale
iloc <- ilocation; isca <- iscale
doff <- df
- if (!is.list(eloc)) eloc <- list()
- if (!is.list(esca)) esca <- list()
-
if (is.finite(doff))
if (!is.Numeric(doff, positive = TRUE))
@@ -4046,19 +4593,27 @@ polya.control <- function(save.weight = TRUE, ...)
}, list( .zero = zero ))),
initialize = eval(substitute(expression({
Musual <- 2
- if (ncol(cbind(w)) != 1)
- stop("prior weights must be a vector or a one-column matrix")
- y <- as.matrix(y)
- extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = Inf, ncol.y.max = Inf,
+ out.wy = TRUE,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+ extra$NOS <- NOS <- ncoly <- ncol(y) # Number of species
extra$Musual <- Musual
M <- Musual * ncoly #
mynames1 <- paste("location", if (NOS > 1) 1:NOS else "", sep = "")
mynames2 <- paste("scale", if (NOS > 1) 1:NOS else "", sep = "")
predictors.names <-
- c(namesof(mynames1, .lloc, earg = .eloc, tag = FALSE),
- namesof(mynames2, .lsca, earg = .esca, tag = FALSE))
+ c(namesof(mynames1, .lloc , earg = .eloc , tag = FALSE),
+ namesof(mynames2, .lsca , earg = .esca , tag = FALSE))
predictors.names <-
predictors.names[interleave.VGAM(Musual * NOS, M = Musual)]
@@ -4066,8 +4621,8 @@ polya.control <- function(save.weight = TRUE, ...)
init.loc <- if (length( .iloc )) .iloc else {
if ( .imethod == 2) apply(y, 2, median) else
- if ( .imethod == 3) y else {
- colSums(w * y) / sum(w)
+ if ( .imethod == 3) (colMeans(y) + t(y)) / 2 else {
+ colSums(w * y) / colSums(w)
}
}
@@ -4075,9 +4630,9 @@ polya.control <- function(save.weight = TRUE, ...)
init.sca <- if (length( .isca )) .isca else
sdvec / 2.3
- mat1 <- matrix(theta2eta(init.loc, .lloc, earg = .eloc), n, NOS,
+ mat1 <- matrix(theta2eta(init.loc, .lloc , earg = .eloc ), n, NOS,
byrow = TRUE)
- mat2 <- matrix(theta2eta(init.sca, .lsca, earg = .esca), n, NOS,
+ mat2 <- matrix(theta2eta(init.sca, .lsca , earg = .esca ), n, NOS,
byrow = TRUE)
etastart <- cbind(mat1, mat2)
etastart <- etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
@@ -4089,7 +4644,7 @@ polya.control <- function(save.weight = TRUE, ...)
linkinv = eval(substitute(function(eta, extra = NULL) {
NOS <- extra$NOS
Musual <- extra$Musual
- Loc <- eta2theta(eta[, Musual*(1:NOS) - 1], .lloc, earg = .eloc)
+ Loc <- eta2theta(eta[, Musual*(1:NOS) - 1], .lloc , earg = .eloc )
Dof <- matrix( .doff , nrow(cbind(Loc)), NOS, byrow = TRUE)
Loc[Dof <= 1] <- NA
Loc
@@ -4098,16 +4653,16 @@ polya.control <- function(save.weight = TRUE, ...)
.doff = doff ))),
last = eval(substitute(expression({
Musual <- extra$Musual
- misc$link <- c(rep( .lloc, length = NOS),
- rep( .lsca, length = NOS))
+ misc$link <- c(rep( .lloc , length = NOS),
+ rep( .lsca , length = NOS))
temp.names <- c(mynames1, mynames2)
temp.names <- temp.names[interleave.VGAM(Musual * NOS, M = Musual)]
names(misc$link) <- temp.names
misc$earg <- vector("list", Musual * NOS)
names(misc$earg) <- temp.names
for(ii in 1:NOS) {
- misc$earg[[Musual*ii-1]] <- .eloc
- misc$earg[[Musual*ii-0]] <- .esca
+ misc$earg[[Musual*ii-1]] <- .eloc
+ misc$earg[[Musual*ii-0]] <- .esca
}
misc$Musual <- Musual
@@ -4115,6 +4670,7 @@ polya.control <- function(save.weight = TRUE, ...)
misc$df <- .doff
misc$imethod <- .imethod
misc$expected = TRUE
+ misc$multipleResponses <- TRUE
}), list( .lloc = lloc, .eloc = eloc,
.lsca = lsca, .esca = esca,
.doff = doff,
@@ -4123,13 +4679,13 @@ polya.control <- function(save.weight = TRUE, ...)
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
NOS <- extra$NOS
Musual <- extra$Musual
- Loc <- eta2theta(eta[, Musual*(1:NOS)-1], .lloc, earg = .eloc)
- Sca <- eta2theta(eta[, Musual*(1:NOS)-0], .lsca, earg = .esca)
+ Loc <- eta2theta(eta[, Musual*(1:NOS)-1], .lloc , earg = .eloc )
+ Sca <- eta2theta(eta[, Musual*(1:NOS)-0], .lsca , earg = .esca )
Dof <- matrix( .doff , nrow(cbind(Loc)), NOS, byrow = TRUE)
zedd <- (y - Loc) / Sca
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * (dt(x = zedd, df = Dof, log = TRUE) - log(Sca)))
+ sum(c(w) * (dt(x = zedd, df = Dof, log = TRUE) - log(Sca)))
}
}, list( .lloc = lloc, .eloc = eloc,
.lsca = lsca, .esca = esca,
@@ -4138,12 +4694,12 @@ polya.control <- function(save.weight = TRUE, ...)
deriv = eval(substitute(expression({
Musual <- extra$Musual
NOS <- extra$NOS
- Loc <- eta2theta(eta[, Musual*(1:NOS)-1], .lloc, earg = .eloc)
- Sca <- eta2theta(eta[, Musual*(1:NOS)-0], .lsca, earg = .esca)
+ Loc <- eta2theta(eta[, Musual*(1:NOS)-1], .lloc , earg = .eloc )
+ Sca <- eta2theta(eta[, Musual*(1:NOS)-0], .lsca , earg = .esca )
Dof <- matrix( .doff , n, NOS, byrow = TRUE)
- dlocat.deta <- dtheta.deta(theta = Loc, .lloc, earg = .eloc)
- dscale.deta <- dtheta.deta(theta = Sca, .lsca, earg = .esca)
+ dlocat.deta <- dtheta.deta(theta = Loc, .lloc , earg = .eloc )
+ dscale.deta <- dtheta.deta(theta = Sca, .lsca , earg = .esca )
zedd <- (y - Loc) / Sca
temp0 <- 1 / Dof
@@ -4174,7 +4730,8 @@ polya.control <- function(save.weight = TRUE, ...)
wz = matrix(as.numeric(NA), n, M) #2=M; diagonal!
wz[, Musual*(1:NOS) - 1] = ed2l.dlocat2 * dlocat.deta^2
wz[, Musual*(1:NOS) ] = ed2l.dscale2 * dscale.deta^2
- c(w) * wz
+
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
}), list( .lloc = lloc, .eloc = eloc,
.lsca = lsca, .esca = esca,
.doff = doff ))))
@@ -4185,56 +4742,104 @@ polya.control <- function(save.weight = TRUE, ...)
- chisq <- function(link = "loge", earg = list())
-{
- if (mode(link) != "character" && mode(link) != "name")
- link <- as.character(substitute(link))
- if (!is.list(earg)) earg <- list()
+ chisq <- function(link = "loge", zero = NULL) {
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
new("vglmff",
blurb = c("Chi-squared distribution\n\n",
"Link: ",
namesof("df", link, earg = earg, tag = FALSE)),
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 1
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ zero = .zero )
+ }, list( .zero = zero ))),
+
+
initialize = eval(substitute(expression({
- if (ncol(cbind(w)) != 1)
- stop("argument 'weights' must be a vector or a one-column matrix")
- y <- as.matrix(y)
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = Inf, ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
- extra$ncoly <- NOS <- ncol(y) # Number of species
+
+ ncoly <- ncol(y)
+ Musual <- 1
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+ extra$ncoly <- NOS <- ncoly # Number of species
mynames1 <- paste("df", if (NOS > 1) 1:NOS else "", sep = "")
- predictors.names <- namesof(mynames1, .link, earg = .earg, tag = FALSE)
+ predictors.names <-
+ namesof(mynames1, .link , earg = .earg , tag = FALSE)
if (!length(mustart) && !length(etastart))
mustart <- y + (1 / 8) * (y == 0)
}), list( .link = link, .earg = earg ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta, .link, earg = .earg )
+ eta2theta(eta, .link , earg = .earg )
}, list( .link = link, .earg = earg ))),
+
last = eval(substitute(expression({
- misc$link <- c(df = .link)
- misc$earg <- list(df = .earg )
+ Musual <- extra$Musual
+ misc$link <- c(rep( .link , length = ncoly))
+ names(misc$link) <- mynames1
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- mynames1
+ for(ii in 1:ncoly) {
+ misc$earg[[ii]] <- .earg
+ }
+
+ misc$Musual <- Musual
+ misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
}), list( .link = link, .earg = earg ))),
+
linkfun = eval(substitute(function(mu, extra = NULL) {
- theta2eta(mu, .link, earg = .earg )
+ theta2eta(mu, .link , earg = .earg )
}, list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- mydf <- eta2theta(eta, .link, earg = .earg )
+ mydf <- eta2theta(eta, .link , earg = .earg )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else
- sum(w * dchisq(x = y, df = mydf, ncp = 0, log = TRUE))
+ sum(c(w) * dchisq(x = y, df = mydf, ncp = 0, log = TRUE))
}, list( .link = link, .earg = earg ))),
vfamily = "chisq",
deriv = eval(substitute(expression({
- mydf <- eta2theta(eta, .link, earg = .earg )
+ mydf <- eta2theta(eta, .link , earg = .earg )
dl.dv <- (log(y / 2) - digamma(mydf / 2)) / 2
- dv.deta <- dtheta.deta(mydf, .link, earg = .earg )
+ dv.deta <- dtheta.deta(mydf, .link , earg = .earg )
c(w) * dl.dv * dv.deta
}), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
- ed2l.dv2 <- -trigamma(mydf / 2) / 4
- wz <- -ed2l.dv2 * dv.deta^2
+ ned2l.dv2 <- trigamma(mydf / 2) / 4
+ wz <- ned2l.dv2 * dv.deta^2
c(w) * wz
}), list( .link = link, .earg = earg ))))
}
@@ -4245,13 +4850,13 @@ polya.control <- function(save.weight = TRUE, ...)
-dsimplex = function(x, mu = 0.5, dispersion = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
+dsimplex <- function(x, mu = 0.5, dispersion = 1, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
rm(log)
sigma = dispersion
- deeFun = function(y, mu)
+ deeFun <- function(y, mu)
(((y - mu) / (mu * (1 - mu)))^2) / (y * (1 - y))
logpdf = (-0.5 * log(2 * pi) - log(sigma) - 1.5 * log(x) -
1.5 * log1p(-x) - 0.5 * deeFun(x, mu) / sigma^2)
@@ -4264,7 +4869,7 @@ dsimplex = function(x, mu = 0.5, dispersion = 1, log = FALSE) {
}
-rsimplex = function(n, mu = 0.5, dispersion = 1) {
+rsimplex <- function(n, mu = 0.5, dispersion = 1) {
use.n = if ((length.n <- length(n)) > 1) length.n else
if (!is.Numeric(n, integer.valued = TRUE,
allowable.length = 1, positive = TRUE))
@@ -4319,20 +4924,25 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
- simplex = function(lmu = "logit", lsigma = "loge",
- emu = list(), esigma = list(),
- imu = NULL, isigma = NULL,
- imethod = 1, shrinkage.init = 0.95,
- zero = 2) {
+ simplex <- function(lmu = "logit", lsigma = "loge",
+ imu = NULL, isigma = NULL,
+ imethod = 1, shrinkage.init = 0.95,
+ zero = 2) {
+
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu = as.character(substitute(lmu))
- if (mode(lsigma) != "character" && mode(lsigma) != "name")
- lsigma = as.character(substitute(lsigma))
- if (!is.list(emu)) emu = list()
- if (!is.list(esigma)) esigma = list()
+
+
+
+ lmu <- as.list(substitute(lmu))
+ emu <- link2list(lmu)
+ lmu <- attr(emu, "function.name")
+
+ lsigma <- as.list(substitute(lsigma))
+ esigma <- link2list(lsigma)
+ lsigma <- attr(esigma, "function.name")
+
if (!is.Numeric(imethod, allowable.length = 1,
integer.valued = TRUE, positive = TRUE) ||
@@ -4343,67 +4953,78 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
shrinkage.init > 1)
stop("bad input for argument 'shrinkage.init'")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
new("vglmff",
- blurb = c("Univariate Simplex distribution\n\n",
+ blurb = c("Univariate simplex distribution\n\n",
"f(y) = [2*pi*sigma^2*(y*(1-y))^3]^(-0.5) * \n",
- " exp[-0.5*(y-mu)^2 / (sigma^2 * y*(1-y)*mu^2*(1-mu)^2)],\n",
+ " exp[-0.5*(y-mu)^2 / (sigma^2 * y * ",
+ "(1-y) * mu^2 * (1-mu)^2)],\n",
" 0 < y < 1, 0 < mu < 1, sigma > 0\n\n",
"Links: ",
namesof("mu", lmu, earg = emu), ", ",
namesof("sigma", lsigma, earg = esigma), "\n\n",
"Mean: mu\n",
"Variance function: V(mu) = mu^3 * (1 - mu)^3"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
initialize = eval(substitute(expression({
- y = as.numeric(y)
- if (ncol(y <- cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
if (any(y <= 0.0 | y >= 1.0))
stop("all 'y' values must be in (0,1)")
+
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE)
+
+
predictors.names = c(
- namesof("mu", .lmu, earg = .emu, tag = FALSE),
+ namesof("mu", .lmu , earg = .emu , tag = FALSE),
namesof("sigma", .lsigma, earg = .esigma, tag = FALSE))
- deeFun = function(y, mu)
+ deeFun <- function(y, mu)
(((y - mu) / (mu * (1 - mu)))^2) / (y * (1 - y))
if (!length(etastart)) {
- use.this = if ( .imethod == 3) weighted.mean(y, w) else
- if ( .imethod == 1) median(y) else
- mean(y, trim = 0.1)
+
+ use.this =
+ if ( .imethod == 3) weighted.mean(y, w = w) else
+ if ( .imethod == 1) median(y) else
+ mean(y, trim = 0.1)
+
+
init.mu = (1 - .sinit) * y + .sinit * use.this
mu.init = rep(if (length( .imu )) .imu else init.mu, length = n)
sigma.init = if (length( .isigma )) rep( .isigma, leng = n) else {
use.this = deeFun(y, mu=init.mu)
rep(sqrt( if ( .imethod == 3) weighted.mean(use.this, w) else
if ( .imethod == 1) median(use.this) else
- mean(use.this, trim = 0.1)),
+ mean(use.this, trim = 0.1)),
length = n)
}
- etastart = cbind(theta2eta(mu.init, .lmu, earg = .emu),
- theta2eta(sigma.init, .lsigma, earg = .esigma))
+ etastart <-
+ cbind(theta2eta(mu.init, .lmu , earg = .emu),
+ theta2eta(sigma.init, .lsigma, earg = .esigma))
}
}), list( .lmu = lmu, .lsigma = lsigma,
.emu = emu, .esigma = esigma,
.imu = imu, .isigma = isigma,
.sinit = shrinkage.init, .imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[, 1], .lmu, earg = .emu)
+ eta2theta(eta[, 1], .lmu , earg = .emu)
}, list( .lmu = lmu, .emu = emu ))),
last = eval(substitute(expression({
- misc$link = c(mu = .lmu, sigma = .lsigma)
- misc$earg = list(mu = .emu, sigma = .esigma)
- misc$imu = .imu
- misc$isigma = .isigma
- misc$imethod = .imethod
- misc$shrinkage.init = .sinit
+ misc$link <- c(mu = .lmu ,
+ sigma = .lsigma)
+ misc$earg <- list(mu = .emu ,
+ sigma = .esigma)
+ misc$imu <- .imu
+ misc$isigma <- .isigma
+ misc$imethod <- .imethod
+ misc$shrinkage.init <- .sinit
}), list( .lmu = lmu, .lsigma = lsigma,
.imu = imu, .isigma = isigma,
.emu = emu, .esigma = esigma,
@@ -4413,23 +5034,24 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
sigma = eta2theta(eta[, 2], .lsigma, earg = .esigma)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dsimplex(x = y, mu = mu, dispersion = sigma, log = TRUE))
+ sum(c(w) * dsimplex(x = y, mu = mu, dispersion = sigma, log = TRUE))
}
}, list( .lsigma = lsigma, .emu = emu,
.esigma = esigma ))),
vfamily = c("simplex"),
deriv = eval(substitute(expression({
- deeFun = function(y, mu)
+ deeFun <- function(y, mu)
(((y - mu) / (mu * (1 - mu)))^2) / (y * (1 - y))
sigma = eta2theta(eta[, 2], .lsigma, earg = .esigma)
- dmu.deta = dtheta.deta(mu, .lmu, earg = .emu)
+ dmu.deta = dtheta.deta(mu, .lmu , earg = .emu)
dsigma.deta = dtheta.deta(sigma, .lsigma, earg = .esigma)
dl.dmu = (y - mu) * (deeFun(y, mu) +
1 / (mu * (1 - mu))^2) / (mu * (1 - mu) * sigma^2)
dl.dsigma = (deeFun(y, mu) / sigma^2 - 1) / sigma
- cbind(dl.dmu * dmu.deta, dl.dsigma * dsigma.deta)
+ cbind(dl.dmu * dmu.deta,
+ dl.dsigma * dsigma.deta)
}), list( .lmu = lmu, .lsigma = lsigma,
.emu = emu, .esigma = esigma ))),
weight = eval(substitute(expression({
@@ -4449,633 +5071,698 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
- rig = function(lmu = "identity", llambda = "loge",
- emu = list(), elambda = list(), imu = NULL, ilambda=1)
+ rig <- function(lmu = "identity", llambda = "loge",
+ imu = NULL, ilambda = 1)
{
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu = as.character(substitute(lmu))
- if (mode(llambda) != "character" && mode(llambda) != "name")
- llambda = as.character(substitute(llambda))
if (!is.Numeric(ilambda, positive = TRUE))
stop("bad input for 'ilambda'")
- if (!is.list(emu)) emu = list()
- if (!is.list(elambda)) elambda = list()
- new("vglmff",
- blurb = c("Reciprocal inverse Gaussian distribution \n",
- "f(y) = [lambda/(2*pi*y)]^(0.5) * \n",
- " exp[-0.5*(lambda/y) * (y-mu)^2], ",
- " 0 < y,\n",
- "Links: ",
- namesof("mu", lmu, earg = emu), ", ",
- namesof("lambda", llambda, earg = elambda), "\n\n",
- "Mean: mu"),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- y = as.numeric(y)
- if (any(y <= 0))
- stop("all y values must be > 0")
- predictors.names =
- c(namesof("mu", .lmu, earg = .emu, tag = FALSE),
- namesof("lambda", .llambda, earg = .elambda, tag = FALSE))
- if (!length(etastart)) {
- mu.init = rep(if (length( .imu)) .imu else
- median(y), length = n)
- lambda.init = rep(if (length( .ilambda )) .ilambda else
- sqrt(var(y)), length = n)
- etastart = cbind(theta2eta(mu.init, .lmu, earg = .emu),
- theta2eta(lambda.init, .llambda, earg = .elambda))
- }
- }), list( .lmu = lmu, .llambda = llambda,
- .emu = emu, .elambda = elambda,
- .imu=imu, .ilambda = ilambda ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[, 1], .lmu, earg = .emu)
- }, list( .lmu = lmu,
- .emu = emu, .elambda = elambda ))),
- last = eval(substitute(expression({
- misc$d3 = d3 # because save.weights = FALSE
- misc$link = c(mu= .lmu, lambda = .llambda)
- misc$earg = list(mu= .emu, lambda = .elambda)
- misc$pooled.weight = pooled.weight
- }), list( .lmu = lmu, .llambda = llambda,
- .emu = emu, .elambda = elambda ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- lambda = eta2theta(eta[, 2], .llambda, earg = .elambda)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * (-0.5*log(y) + 0.5*log(lambda) - (0.5*lambda/y) * (y-mu)^2))
- }, list( .llambda = llambda,
- .emu = emu, .elambda = elambda ))),
- vfamily = c("rig"),
- deriv = eval(substitute(expression({
- if (iter == 1) {
- d3 = deriv3(~ w *
- (-0.5*log(y) + 0.5*log(lambda) - (0.5*lambda/y) * (y-mu)^2),
- c("mu", "lambda"), hessian= TRUE)
- }
+ lmu <- as.list(substitute(lmu))
+ emu <- link2list(lmu)
+ lmu <- attr(emu, "function.name")
- lambda = eta2theta(eta[, 2], .llambda, earg = .elambda)
+ llambda <- as.list(substitute(llambda))
+ elambda <- link2list(llambda)
+ llambda <- attr(elambda, "function.name")
- eval.d3 = eval(d3)
- dl.dthetas = attr(eval.d3, "gradient")
- dmu.deta = dtheta.deta(mu, .lmu, earg = .emu)
- dlambda.deta = dtheta.deta(lambda, .llambda, earg = .elambda)
- dtheta.detas = cbind(dmu.deta, dlambda.deta)
+ new("vglmff",
+ blurb = c("Reciprocal inverse Gaussian distribution \n",
+ "f(y) = [lambda/(2*pi*y)]^(0.5) * \n",
+ " exp[-0.5*(lambda/y) * (y-mu)^2], ",
+ " 0 < y,\n",
+ "Links: ",
+ namesof("mu", lmu, earg = emu), ", ",
+ namesof("lambda", llambda, earg = elambda), "\n\n",
+ "Mean: mu"),
+ initialize = eval(substitute(expression({
- dl.dthetas * dtheta.detas
- }), list( .lmu = lmu, .llambda = llambda,
- .emu = emu, .elambda = elambda ))),
- weight = eval(substitute(expression({
- d2l.dthetas2 = attr(eval.d3, "hessian")
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
- wz[,iam(1,1,M)] = -d2l.dthetas2[, 1,1] * dtheta.detas[, 1]^2
- wz[,iam(2,2,M)] = -d2l.dthetas2[, 2,2] * dtheta.detas[, 2]^2
- wz[,iam(1,2,M)] = -d2l.dthetas2[, 1,2] * dtheta.detas[, 1] *
- dtheta.detas[, 2]
- if (!.expected) {
- d2mudeta2 = d2theta.deta2(mu, .lmu, earg = .emu)
- d2lambda = d2theta.deta2(lambda, .llambda, earg = .elambda)
- wz[,iam(1,1,M)] = wz[,iam(1,1,M)] - dl.dthetas[, 1] * d2mudeta2
- wz[,iam(2,2,M)] = wz[,iam(2,2,M)] - dl.dthetas[, 2] * d2lambda
- }
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = 1, ncol.y.max = 1)
- if (intercept.only) {
- sumw = sum(w)
- for(ii in 1:ncol(wz))
- wz[,ii] = sum(wz[,ii]) / sumw
- pooled.weight = TRUE
- wz = c(w) * wz # Put back the weights
- } else
- pooled.weight = FALSE
- wz
- }), list( .lmu = lmu, .llambda = llambda, .expected = FALSE,
- .emu = emu, .elambda = elambda ))))
+
+ predictors.names =
+ c(namesof("mu", .lmu , earg = .emu , tag = FALSE),
+ namesof("lambda", .llambda , earg = .elambda , tag = FALSE))
+ if (!length(etastart)) {
+ mu.init = rep(if (length( .imu )) .imu else
+ median(y), length = n)
+ lambda.init = rep(if (length( .ilambda )) .ilambda else
+ sqrt(var(y)), length = n)
+ etastart <-
+ cbind(theta2eta(mu.init, .lmu , earg = .emu),
+ theta2eta(lambda.init, .llambda , earg = .elambda ))
+ }
+ }), list( .lmu = lmu, .llambda = llambda,
+ .emu = emu, .elambda = elambda,
+ .imu = imu, .ilambda = ilambda ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[, 1], .lmu , earg = .emu)
+ }, list( .lmu = lmu,
+ .emu = emu, .elambda = elambda ))),
+ last = eval(substitute(expression({
+ misc$d3 <- d3 # because save.weights = FALSE
+ misc$link <- c(mu = .lmu , lambda = .llambda )
+ misc$earg <- list(mu = .emu , lambda = .elambda )
+ misc$pooled.weight <- pooled.weight
+ }), list( .lmu = lmu, .llambda = llambda,
+ .emu = emu, .elambda = elambda ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ lambda = eta2theta(eta[, 2], .llambda , earg = .elambda )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(c(w) * (-0.5*log(y) + 0.5*log(lambda) - (0.5*lambda/y) * (y-mu)^2))
+ }, list( .llambda = llambda,
+ .emu = emu, .elambda = elambda ))),
+ vfamily = c("rig"),
+ deriv = eval(substitute(expression({
+ if (iter == 1) {
+ d3 = deriv3(~ w *
+ (-0.5*log(y) + 0.5*log(lambda) - (0.5*lambda/y) * (y-mu)^2),
+ c("mu", "lambda"), hessian= TRUE)
+ }
+
+ lambda = eta2theta(eta[, 2], .llambda , earg = .elambda )
+
+ eval.d3 = eval(d3)
+ dl.dthetas = attr(eval.d3, "gradient")
+
+ dmu.deta = dtheta.deta(mu, .lmu , earg = .emu)
+ dlambda.deta = dtheta.deta(lambda, .llambda , earg = .elambda )
+ dtheta.detas = cbind(dmu.deta, dlambda.deta)
+
+ dl.dthetas * dtheta.detas
+ }), list( .lmu = lmu, .llambda = llambda,
+ .emu = emu, .elambda = elambda ))),
+ weight = eval(substitute(expression({
+ d2l.dthetas2 = attr(eval.d3, "hessian")
+
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz[, iam(1, 1, M)] = -d2l.dthetas2[, 1, 1] * dtheta.detas[, 1]^2
+ wz[, iam(2, 2, M)] = -d2l.dthetas2[, 2, 2] * dtheta.detas[, 2]^2
+ wz[, iam(1, 2, M)] = -d2l.dthetas2[, 1, 2] * dtheta.detas[, 1] *
+ dtheta.detas[, 2]
+ if (!.expected) {
+ d2mudeta2 = d2theta.deta2(mu, .lmu , earg = .emu)
+ d2lambda = d2theta.deta2(lambda, .llambda , earg = .elambda )
+ wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] - dl.dthetas[, 1] * d2mudeta2
+ wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)] - dl.dthetas[, 2] * d2lambda
+ }
+
+ if (intercept.only) {
+ sumw = sum(w)
+ for(ii in 1:ncol(wz))
+ wz[, ii] = sum(wz[, ii]) / sumw
+ pooled.weight = TRUE
+ wz = c(w) * wz # Put back the weights
+ } else
+ pooled.weight = FALSE
+
+ wz
+ }), list( .lmu = lmu, .llambda = llambda, .expected = FALSE,
+ .emu = emu, .elambda = elambda ))))
}
- hypersecant = function(link.theta = "elogit",
- earg = if (link.theta == "elogit") list(min = -pi/2, max = pi/2) else list(),
- init.theta = NULL)
+ hypersecant <- function(link.theta = elogit(min = -pi/2, max = pi/2),
+ init.theta = NULL)
{
- if (mode(link.theta) != "character" && mode(link.theta) != "name")
- link.theta = as.character(substitute(link.theta))
- if (!is.list(earg)) earg = list()
- new("vglmff",
- blurb = c("Hyperbolic Secant distribution \n",
- "f(y) = exp(theta*y + log(cos(theta ))) / (2*cosh(pi*y/2))\n",
- " for all y,\n",
- "Link: ",
- namesof("theta", link.theta, earg = earg), "\n\n",
- "Mean: tan(theta)"),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names = namesof("theta", .link.theta, earg = .earg, tag = FALSE)
- if (!length(etastart)) {
- theta.init = rep(if (length( .init.theta)) .init.theta else
- median(y), length = n)
- etastart = theta2eta(theta.init, .link.theta, earg = .earg )
- }
- }), list( .link.theta = link.theta, .earg = earg,
- .init.theta=init.theta ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- theta = eta2theta(eta, .link.theta, earg = .earg )
- tan(theta)
- }, list( .link.theta = link.theta, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = c(theta = .link.theta )
- misc$earg = list(theta = .earg )
- misc$expected = TRUE
- }), list( .link.theta = link.theta, .earg = earg ))),
- loglikelihood = eval(substitute(function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- theta = eta2theta(eta, .link.theta, earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * (theta*y + log(cos(theta)) - log(cosh(pi*y/2 ))))
- }, list( .link.theta = link.theta, .earg = earg ))),
- vfamily = c("hypersecant"),
- deriv = eval(substitute(expression({
- theta = eta2theta(eta, .link.theta, earg = .earg )
- dl.dthetas = y - tan(theta)
- dparam.deta = dtheta.deta(theta, .link.theta, earg = .earg )
- c(w) * dl.dthetas * dparam.deta
- }), list( .link.theta = link.theta, .earg = earg ))),
- weight = expression({
- d2l.dthetas2 = 1 / cos(theta)^2
- wz = c(w) * d2l.dthetas2 * dparam.deta^2
- wz
- }))
+ link.theta <- as.list(substitute(link.theta))
+ earg <- link2list(link.theta)
+ link.theta <- attr(earg, "function.name")
+
+
+ new("vglmff",
+ blurb = c("Hyperbolic Secant distribution \n",
+ "f(y) = exp(theta*y + log(cos(theta ))) / (2*cosh(pi*y/2))\n",
+ " for all y,\n",
+ "Link: ",
+ namesof("theta", link.theta , earg = earg), "\n\n",
+ "Mean: tan(theta)"),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+
+
+ predictors.names <-
+ namesof("theta", .link.theta , earg = .earg , tag = FALSE)
+ if (!length(etastart)) {
+ theta.init = rep(if (length( .init.theta )) .init.theta else
+ median(y), length = n)
+ etastart <-
+ theta2eta(theta.init, .link.theta , earg = .earg )
+ }
+ }), list( .link.theta = link.theta , .earg = earg,
+ .init.theta = init.theta ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ theta = eta2theta(eta, .link.theta , earg = .earg )
+ tan(theta)
+ }, list( .link.theta = link.theta , .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link <- c(theta = .link.theta )
+ misc$earg <- list(theta = .earg )
+ misc$expected <- TRUE
+ }), list( .link.theta = link.theta , .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ theta = eta2theta(eta, .link.theta , earg = .earg )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(c(w) * (theta*y + log(cos(theta)) - log(cosh(pi*y/2 ))))
+ }, list( .link.theta = link.theta , .earg = earg ))),
+ vfamily = c("hypersecant"),
+ deriv = eval(substitute(expression({
+ theta = eta2theta(eta, .link.theta , earg = .earg )
+ dl.dthetas = y - tan(theta)
+ dparam.deta = dtheta.deta(theta, .link.theta , earg = .earg )
+ c(w) * dl.dthetas * dparam.deta
+ }), list( .link.theta = link.theta , .earg = earg ))),
+ weight = expression({
+ d2l.dthetas2 = 1 / cos(theta)^2
+ wz = c(w) * d2l.dthetas2 * dparam.deta^2
+ wz
+ }))
}
- hypersecant.1 = function(link.theta = "elogit",
- earg = if (link.theta == "elogit") list(min = -pi/2, max = pi/2) else
- list(),
- init.theta = NULL)
+ hypersecant.1 <- function(link.theta = elogit(min = -pi/2, max = pi/2),
+ init.theta = NULL)
{
- if (mode(link.theta) != "character" && mode(link.theta) != "name")
- link.theta = as.character(substitute(link.theta))
- if (!is.list(earg)) earg = list()
- new("vglmff",
- blurb = c("Hyperbolic Secant distribution \n",
- "f(y) = (cos(theta)/pi) * y^(-0.5+theta/pi) * \n",
- " (1-y)^(-0.5-theta/pi), ",
- " 0 < y < 1,\n",
- "Link: ",
- namesof("theta", link.theta, earg = earg), "\n\n",
- "Mean: 0.5 + theta/pi", "\n",
- "Variance: (pi^2 - 4*theta^2) / (8*pi^2)"),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- y = as.numeric(y)
- if (any(y <= 0 | y >= 1))
- stop("all y values must be in (0,1)")
- predictors.names = namesof("theta", .link.theta, earg = .earg, tag = FALSE)
- if (!length(etastart)) {
- theta.init = rep(if (length( .init.theta)) .init.theta else
- median(y), length = n)
+ link.theta <- as.list(substitute(link.theta))
+ earg <- link2list(link.theta)
+ link.theta <- attr(earg, "function.name")
+
+
+ new("vglmff",
+ blurb = c("Hyperbolic Secant distribution \n",
+ "f(y) = (cos(theta)/pi) * y^(-0.5+theta/pi) * \n",
+ " (1-y)^(-0.5-theta/pi), ",
+ " 0 < y < 1,\n",
+ "Link: ",
+ namesof("theta", link.theta , earg = earg), "\n\n",
+ "Mean: 0.5 + theta/pi", "\n",
+ "Variance: (pi^2 - 4*theta^2) / (8*pi^2)"),
+ initialize = eval(substitute(expression({
+ if (any(y <= 0 | y >= 1))
+ stop("all response 'y' values must be in (0,1)")
+
+
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+
+
+ predictors.names <-
+ namesof("theta", .link.theta , earg = .earg , tag = FALSE)
+
+ if (!length(etastart)) {
+ theta.init = rep(if (length( .init.theta )) .init.theta else
+ median(y), length = n)
+
+ etastart <-
+ theta2eta(theta.init, .link.theta , earg = .earg )
+ }
+ }), list( .link.theta = link.theta , .earg = earg,
+ .init.theta = init.theta ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ theta = eta2theta(eta, .link.theta , earg = .earg )
+ 0.5 + theta / pi
+ }, list( .link.theta = link.theta , .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link <- c(theta = .link.theta )
+ misc$earg <- list(theta = .earg )
+ misc$expected <- TRUE
+ }), list( .link.theta = link.theta , .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ theta = eta2theta(eta, .link.theta , earg = .earg )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(c(w) * (log(cos(theta)) + (-0.5+theta/pi)*log(y) +
+ (-0.5-theta/pi)*log1p(-y )))
+ }, list( .link.theta = link.theta , .earg = earg ))),
+ vfamily = c("hypersecant.1"),
+ deriv = eval(substitute(expression({
+ theta = eta2theta(eta, .link.theta , earg = .earg )
+ dl.dthetas = -tan(theta) + log(y/(1-y)) / pi
+ dparam.deta = dtheta.deta(theta, .link.theta , earg = .earg )
+ c(w) * dl.dthetas * dparam.deta
+ }), list( .link.theta = link.theta , .earg = earg ))),
+ weight = expression({
+ d2l.dthetas2 = 1 / cos(theta)^2
+ wz = c(w) * d2l.dthetas2 * dparam.deta^2
+ wz
+ }))
+}
+
+
+
+ leipnik <- function(lmu = "logit", llambda = "loge",
+ imu = NULL, ilambda = NULL)
+{
+
+
+
+
+ lmu <- as.list(substitute(lmu))
+ emu <- link2list(lmu)
+ lmu <- attr(emu, "function.name")
+
+ llambda <- as.list(substitute(llambda))
+ elambda <- link2list(llambda)
+ llambda <- attr(elambda, "function.name")
+
+
+ if (is.Numeric(ilambda) && any(ilambda <= -1))
+ stop("argument 'ilambda' must be > -1")
+
+
+
+ new("vglmff",
+ blurb = c("Leipnik's distribution \n",
+ "f(y) = (y(1-y))^(-1/2) * [1 + (y-mu)^2 / (y*(1-y))]^(-lambda/2) /\n",
+ " Beta[(lambda+1)/2, 1/2], ",
+ " 0 < y < 1, lambda > -1\n",
+ "Links: ",
+ namesof("mu", lmu, earg = emu), ", ",
+ namesof("lambda", llambda, earg = elambda), "\n\n",
+ "Mean: mu\n",
+ "Variance: mu*(1-mu)"),
+ initialize = eval(substitute(expression({
+ if (any(y <= 0 | y >= 1))
+ stop("all response 'y' values must be in (0,1)")
+
+
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+
+
+ predictors.names <-
+ c(namesof("mu", .lmu , earg = .emu , tag = FALSE),
+ namesof("lambda", .llambda , earg = .elambda , tag = FALSE))
+
+ if (!length(etastart)) {
+ mu.init = rep(if (length( .imu )) .imu else
+ (y), length = n)
+ lambda.init = rep(if (length( .ilambda )) .ilambda else
+ 1/var(y), length = n)
+ etastart <-
+ cbind(theta2eta(mu.init, .lmu , earg = .emu ),
+ theta2eta(lambda.init, .llambda , earg = .elambda ))
+ }
+ }), list( .lmu = lmu, .llambda = llambda,
+ .emu = emu, .elambda = elambda,
+ .imu = imu, .ilambda = ilambda ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[, 1], .lmu , earg = .emu)
+ }, list( .lmu = lmu,
+ .emu = emu, .elambda = elambda ))),
+ last = eval(substitute(expression({
+ misc$link <- c(mu = .lmu , lambda = .llambda )
+ misc$earg <- list(mu = .emu , lambda = .elambda )
+
+ misc$pooled.weight <- pooled.weight
+ misc$expected = FALSE
+ }), list( .lmu = lmu, .llambda = llambda,
+ .emu = emu, .elambda = elambda ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ lambda = eta2theta(eta[, 2], .llambda , earg = .elambda )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(c(w) * (-0.5*log(y*(1-y)) - 0.5 * lambda *
+ log1p((y-mu)^2 / (y*(1-y ))) - lgamma((lambda+1)/2) +
+ lgamma(1+ lambda/2 )))
+ }, list( .llambda = llambda,
+ .emu = emu, .elambda = elambda ))),
+ vfamily = c("leipnik"),
+ deriv = eval(substitute(expression({
+ lambda = eta2theta(eta[, 2], .llambda , earg = .elambda )
+ dl.dthetas =
+ cbind(dl.dmu = lambda*(y-mu) / (y*(1-y)+(y-mu)^2),
+ dl.dlambda= -0.5 * log1p((y-mu)^2 / (y*(1-y))) -
+ 0.5*digamma((lambda+1)/2) +
+ 0.5*digamma(1+lambda/2))
+
+ dmu.deta = dtheta.deta(mu, .lmu , earg = .emu)
+ dlambda.deta = dtheta.deta(lambda, .llambda , earg = .elambda )
+ dtheta.detas = cbind(dmu.deta, dlambda.deta)
+
+ c(w) * dl.dthetas * dtheta.detas
+ }), list( .lmu = lmu, .llambda = llambda,
+ .emu = emu, .elambda = elambda ))),
+ weight = eval(substitute(expression({
+ denominator = y*(1-y) + (y-mu)^2
+ d2l.dthetas2 = array(NA, c(n, 2, 2))
+ d2l.dthetas2[, 1, 1] = c(w) * lambda*(-y*(1-y)+(y-mu)^2)/denominator^2
+ d2l.dthetas2[, 1, 2] =
+ d2l.dthetas2[, 2, 1] = c(w) * (y-mu) / denominator
+ d2l.dthetas2[, 2, 2] = c(w) * (-0.25*trigamma((lambda+1)/2) +
+ 0.25*trigamma(1+lambda/2))
+
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz[, iam(1, 1, M)] = -d2l.dthetas2[, 1, 1] * dtheta.detas[, 1]^2
+ wz[, iam(2, 2, M)] = -d2l.dthetas2[, 2, 2] * dtheta.detas[, 2]^2
+ wz[, iam(1, 2, M)] = -d2l.dthetas2[, 1, 2] * dtheta.detas[, 1] *
+ dtheta.detas[, 2]
+ if (!.expected) {
+ d2mudeta2 = d2theta.deta2(mu, .lmu , earg = .emu)
+ d2lambda = d2theta.deta2(lambda, .llambda , earg = .elambda )
+ wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] - dl.dthetas[, 1] * d2mudeta2
+ wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)] - dl.dthetas[, 2] * d2lambda
+ }
+
+ if (intercept.only) {
+ sumw <- sum(w)
+ for(ii in 1:ncol(wz))
+ wz[, ii] <- sum(wz[, ii]) / sumw
+ pooled.weight <- TRUE
+ wz <- c(w) * wz # Put back the weights
+ } else
+ pooled.weight = FALSE
+
+ wz
+ }), list( .lmu = lmu, .llambda = llambda, .expected = FALSE,
+ .emu = emu, .elambda = elambda ))))
+}
+
+
+
+
+
+ invbinomial <- function(lrho = elogit(min = 0.5, max = 1),
+ llambda = "loge",
+ irho = NULL,
+ ilambda = NULL,
+ zero = NULL)
+{
+
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+
+ lrho <- as.list(substitute(lrho))
+ erho <- link2list(lrho)
+ lrho <- attr(erho, "function.name")
+
+ llambda <- as.list(substitute(llambda))
+ elambda <- link2list(llambda)
+ llambda <- attr(elambda, "function.name")
+
+
+ new("vglmff",
+ blurb = c("Inverse binomial distribution\n\n",
+ "Links: ",
+ namesof("rho", lrho, earg = erho), ", ",
+ namesof("lambda", llambda, earg = elambda), "\n",
+ "Mean: lambda*(1-rho)/(2*rho-1)\n",
+ "Variance: lambda*rho*(1-rho)/(2*rho-1)^3\n"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
- etastart = theta2eta(theta.init, .link.theta, earg = .earg )
- }
- }), list( .link.theta = link.theta, .earg = earg,
- .init.theta=init.theta ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- theta = eta2theta(eta, .link.theta, earg = .earg )
- 0.5 + theta/pi
- }, list( .link.theta = link.theta, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = c(theta = .link.theta)
- misc$earg = list(theta = .earg )
- misc$expected = TRUE
- }), list( .link.theta = link.theta, .earg = earg ))),
- loglikelihood = eval(substitute(function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- theta = eta2theta(eta, .link.theta, earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * (log(cos(theta)) + (-0.5+theta/pi)*log(y) +
- (-0.5-theta/pi)*log1p(-y )))
- }, list( .link.theta = link.theta, .earg = earg ))),
- vfamily = c("hypersecant.1"),
- deriv = eval(substitute(expression({
- theta = eta2theta(eta, .link.theta, earg = .earg )
- dl.dthetas = -tan(theta) + log(y/(1-y)) / pi
- dparam.deta = dtheta.deta(theta, .link.theta, earg = .earg )
- c(w) * dl.dthetas * dparam.deta
- }), list( .link.theta = link.theta, .earg = earg ))),
- weight = expression({
- d2l.dthetas2 = 1 / cos(theta)^2
- wz = c(w) * d2l.dthetas2 * dparam.deta^2
- wz
- }))
-}
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
- leipnik = function(lmu = "logit", llambda = "loge",
- emu = list(), elambda = list(), imu = NULL, ilambda = NULL)
-{
+ predictors.names <-
+ c(namesof("rho", .lrho, earg = .erho, tag = FALSE),
+ namesof("lambda", .llambda , earg = .elambda , tag = FALSE))
+ if (!length(etastart)) {
+ covarn = sd(c(y))^2 / weighted.mean(y, w)
+ temp1 = 0.5 + (1 + sqrt(1+8*covarn)) / (8*covarn)
+ temp2 = 0.5 + (1 - sqrt(1+8*covarn)) / (8*covarn)
+ init.rho = rep(if (length( .irho)) .irho else {
+ ifelse(temp1 > 0.5 && temp1 < 1, temp1, temp2)
+ }, length = n)
+ init.lambda = rep(if (length( .ilambda)) .ilambda else {
+ (2*init.rho-1) * weighted.mean(y, w) / (1-init.rho)
+ }, length = n)
+ etastart <-
+ cbind(theta2eta(init.rho, .lrho, earg = .erho),
+ theta2eta(init.lambda, .llambda , earg = .elambda ))
+ }
+ }), list( .llambda = llambda, .lrho = lrho,
+ .elambda = elambda, .erho = erho,
+ .ilambda = ilambda, .irho = irho ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ rho = eta2theta(eta[, 1], .lrho, earg = .erho)
+ lambda = eta2theta(eta[, 2], .llambda , earg = .elambda )
+ ifelse(rho > 0.5, lambda*(1-rho)/(2*rho-1), NA)
+ }, list( .llambda = llambda, .lrho = lrho,
+ .elambda = elambda, .erho = erho ))),
+ last = eval(substitute(expression({
+ misc$link <- c(rho= .lrho, lambda = .llambda )
+ misc$earg <- list(rho= .erho, lambda = .elambda )
+ misc$pooled.weight <- pooled.weight
+ }), list( .llambda = llambda, .lrho = lrho,
+ .elambda = elambda, .erho = erho ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ rho = eta2theta(eta[, 1], .lrho, earg = .erho)
+ lambda = eta2theta(eta[, 2], .llambda , earg = .elambda )
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu = as.character(substitute(lmu))
- if (mode(llambda) != "character" && mode(llambda) != "name")
- llambda = as.character(substitute(llambda))
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(c(w) * (log(lambda) - lgamma(2*y+lambda) - lgamma(y+1) -
+ lgamma(y+lambda+1) + y*log(rho) + y*log1p(-rho) +
+ lambda*log(rho)))
+ }, list( .llambda = llambda, .lrho = lrho,
+ .elambda = elambda, .erho = erho ))),
+ vfamily = c("invbinomial"),
+ deriv = eval(substitute(expression({
+ rho = eta2theta(eta[, 1], .lrho, earg = .erho)
+ lambda = eta2theta(eta[, 2], .llambda , earg = .elambda )
- if (is.Numeric(ilambda) && any(ilambda <= -1))
- stop("ilambda must be > -1")
+ dl.drho = (y + lambda)/rho - y/(1-rho)
+ dl.dlambda = 1/lambda - digamma(2*y+lambda) - digamma(y+lambda+1) +
+ log(rho)
- if (!is.list(emu)) emu = list()
- if (!is.list(elambda)) elambda = list()
+ drho.deta = dtheta.deta(rho, .lrho, earg = .erho)
+ dlambda.deta = dtheta.deta(lambda, .llambda , earg = .elambda )
- new("vglmff",
- blurb = c("Leipnik's distribution \n",
- "f(y) = (y(1-y))^(-1/2) * [1 + (y-mu)^2 / (y*(1-y))]^(-lambda/2) /\n",
- " Beta[(lambda+1)/2, 1/2], ",
- " 0 < y < 1, lambda > -1\n",
- "Links: ",
- namesof("mu", lmu, earg = emu), ", ",
- namesof("lambda", llambda, earg = elambda), "\n\n",
- "Mean: mu\n",
- "Variance: mu*(1-mu)"),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- y = as.numeric(y)
- if (any(y <= 0 | y >= 1))
- stop("all y values must be in (0,1)")
- predictors.names =
- c(namesof("mu", .lmu, earg = .emu, tag = FALSE),
- namesof("lambda", .llambda, earg = .elambda, tag = FALSE))
- if (!length(etastart)) {
- mu.init = rep(if (length( .imu)) .imu else
- (y), length = n)
- lambda.init = rep(if (length( .ilambda)) .ilambda else
- 1/var(y), length = n)
- etastart = cbind(theta2eta(mu.init, .lmu, earg = .emu),
- theta2eta(lambda.init, .llambda, earg = .elambda))
- }
- }), list( .lmu = lmu, .llambda = llambda, .imu=imu, .ilambda = ilambda,
- .emu = emu, .elambda = elambda ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[, 1], .lmu, earg = .emu)
- }, list( .lmu = lmu,
- .emu = emu, .elambda = elambda ))),
- last = eval(substitute(expression({
- misc$link = c(mu= .lmu, lambda = .llambda)
- misc$earg = list(mu= .emu, lambda = .elambda)
- misc$pooled.weight = pooled.weight
- misc$expected = FALSE
- }), list( .lmu = lmu, .llambda = llambda,
- .emu = emu, .elambda = elambda ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- lambda = eta2theta(eta[, 2], .llambda, earg = .elambda)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w * (-0.5*log(y*(1-y)) - 0.5 * lambda *
- log1p((y-mu)^2 / (y*(1-y ))) - lgamma((lambda+1)/2) +
- lgamma(1+ lambda/2 )))
- }, list( .llambda = llambda,
- .emu = emu, .elambda = elambda ))),
- vfamily = c("leipnik"),
- deriv = eval(substitute(expression({
- lambda = eta2theta(eta[, 2], .llambda, earg = .elambda)
- dl.dthetas =
- c(w) * cbind(dl.dmu = lambda*(y-mu) / (y*(1-y)+(y-mu)^2),
- dl.dlambda= -0.5 * log1p((y-mu)^2 / (y*(1-y))) -
- 0.5*digamma((lambda+1)/2) +
- 0.5*digamma(1+lambda/2))
- dmu.deta = dtheta.deta(mu, .lmu, earg = .emu)
- dlambda.deta = dtheta.deta(lambda, .llambda, earg = .elambda)
- dtheta.detas = cbind(dmu.deta, dlambda.deta)
- dl.dthetas * dtheta.detas
- }), list( .lmu = lmu, .llambda = llambda,
- .emu = emu, .elambda = elambda ))),
- weight = eval(substitute(expression({
- if (is.R()) {
- denominator = y*(1-y) + (y-mu)^2
- d2l.dthetas2 = array(NA, c(n,2,2))
- d2l.dthetas2[, 1,1] = c(w) * lambda*(-y*(1-y)+(y-mu)^2)/denominator^2
- d2l.dthetas2[, 1,2] =
- d2l.dthetas2[, 2,1] = c(w) * (y-mu) / denominator
- d2l.dthetas2[, 2,2] = c(w) * (-0.25*trigamma((lambda+1)/2) +
- 0.25*trigamma(1+lambda/2))
- } else {
- d2l.dthetas2 = attr(eval.d3, "hessian")
- }
+ c(w) * cbind(dl.drho * drho.deta,
+ dl.dlambda * dlambda.deta )
+ }), list( .llambda = llambda, .lrho = lrho,
+ .elambda = elambda, .erho = erho ))),
+ weight = eval(substitute(expression({
+ ed2l.drho2 = (mu+lambda) / rho^2 + mu / (1-rho)^2
+ d2l.dlambda2 = 1/(lambda^2) + trigamma(2*y+lambda)+trigamma(y+lambda+1)
+ ed2l.dlambdarho = -1/rho
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
- wz[,iam(1,1,M)] = -d2l.dthetas2[, 1,1] * dtheta.detas[, 1]^2
- wz[,iam(2,2,M)] = -d2l.dthetas2[, 2,2] * dtheta.detas[, 2]^2
- wz[,iam(1,2,M)] = -d2l.dthetas2[, 1,2] * dtheta.detas[, 1] *
- dtheta.detas[, 2]
- if (!.expected) {
- d2mudeta2 = d2theta.deta2(mu, .lmu, earg = .emu)
- d2lambda = d2theta.deta2(lambda, .llambda, earg = .elambda)
- wz[,iam(1,1,M)] = wz[,iam(1,1,M)] - dl.dthetas[, 1] * d2mudeta2
- wz[,iam(2,2,M)] = wz[,iam(2,2,M)] - dl.dthetas[, 2] * d2lambda
- }
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ wz[, iam(1, 1, M)] = ed2l.drho2 * drho.deta^2
+ wz[, iam(1, 2, M)] = ed2l.dlambdarho * dlambda.deta * drho.deta
+ wz[, iam(2, 2, M)] = d2l.dlambda2 * dlambda.deta^2
- if (intercept.only) {
- sumw = sum(w)
- for(ii in 1:ncol(wz))
- wz[,ii] = sum(wz[,ii]) / sumw
- pooled.weight = TRUE
- wz = c(w) * wz # Put back the weights
- } else
- pooled.weight = FALSE
+ d2rhodeta2 = d2theta.deta2(rho, .lrho, earg = .erho)
+ d2lambda.deta2 = d2theta.deta2(lambda, .llambda , earg = .elambda )
+ wz = c(w) * wz
- wz
- }), list( .lmu = lmu, .llambda = llambda, .expected = FALSE,
- .emu = emu, .elambda = elambda ))))
-}
+ if (intercept.only) {
+ pooled.weight = TRUE
+
+ wz[, iam(2, 2, M)] = sum(wz[, iam(2, 2, M)]) / sum(w)
+ } else {
+ pooled.weight = FALSE
+ }
+ wz
+ }), list( .llambda = llambda, .lrho = lrho,
+ .elambda = elambda, .erho = erho ))))
+}
- invbinomial = function(lrho = "elogit", llambda = "loge",
- erho=if (lrho == "elogit") list(min = 0.5, max = 1) else list(),
- elambda = list(),
- irho = NULL,
- ilambda = NULL,
- zero = NULL)
+ genpoisson <- function(llambda = elogit(min = -1, max = 1),
+ ltheta = "loge",
+ ilambda = NULL, itheta = NULL,
+ use.approx = TRUE,
+ imethod = 1, zero = 1)
{
- if (mode(lrho) != "character" && mode(lrho) != "name")
- lrho = as.character(substitute(lrho))
- if (mode(llambda) != "character" && mode(llambda) != "name")
- llambda = as.character(substitute(llambda))
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
+ llambda <- as.list(substitute(llambda))
+ elambda <- link2list(llambda)
+ llambda <- attr(elambda, "function.name")
- if (!is.list(erho)) erho = list()
- if (!is.list(elambda)) elambda = list()
+ ltheta <- as.list(substitute(ltheta))
+ etheta <- link2list(ltheta)
+ ltheta <- attr(etheta, "function.name")
- new("vglmff",
- blurb = c("Inverse binomial distribution\n\n",
- "Links: ",
- namesof("rho", lrho, earg = erho), ", ",
- namesof("lambda", llambda, earg = elambda), "\n",
- "Mean: lambda*(1-rho)/(2*rho-1)\n",
- "Variance: lambda*rho*(1-rho)/(2*rho-1)^3\n"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("rho", .lrho, earg = .erho, tag = FALSE),
- namesof("lambda", .llambda, earg = .elambda, tag = FALSE))
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
- if (!length(etastart)) {
- covarn = sd(y)^2 / weighted.mean(y, w)
- temp1 = 0.5 + (1 + sqrt(1+8*covarn)) / (8*covarn)
- temp2 = 0.5 + (1 - sqrt(1+8*covarn)) / (8*covarn)
- init.rho = rep(if (length( .irho)) .irho else {
- ifelse(temp1 > 0.5 && temp1 < 1, temp1, temp2)
- }, length = n)
- init.lambda = rep(if (length( .ilambda)) .ilambda else {
- (2*init.rho-1) * weighted.mean(y, w) / (1-init.rho)
- }, length = n)
- etastart = cbind(theta2eta(init.rho, .lrho, earg = .erho),
- theta2eta(init.lambda, .llambda, earg = .elambda))
- }
- }), list( .llambda = llambda, .lrho=lrho,
- .elambda = elambda, .erho=erho,
- .ilambda = ilambda, .irho=irho ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- rho = eta2theta(eta[, 1], .lrho, earg = .erho)
- lambda = eta2theta(eta[, 2], .llambda, earg = .elambda)
- ifelse(rho > 0.5, lambda*(1-rho)/(2*rho-1), NA)
- }, list( .llambda = llambda, .lrho=lrho,
- .elambda = elambda, .erho=erho ))),
- last = eval(substitute(expression({
- misc$link = c(rho= .lrho, lambda = .llambda)
- misc$earg = list(rho= .erho, lambda = .elambda)
- misc$pooled.weight = pooled.weight
- }), list( .llambda = llambda, .lrho=lrho,
- .elambda = elambda, .erho=erho ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- rho = eta2theta(eta[, 1], .lrho, earg = .erho)
- lambda = eta2theta(eta[, 2], .llambda, earg = .elambda)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w*(log(lambda) - lgamma(2*y+lambda) - lgamma(y+1) -
- lgamma(y+lambda+1) + y*log(rho) + y*log1p(-rho) +
- lambda*log(rho)))
- }, list( .llambda = llambda, .lrho=lrho,
- .elambda = elambda, .erho=erho ))),
- vfamily = c("invbinomial"),
- deriv = eval(substitute(expression({
- rho = eta2theta(eta[, 1], .lrho, earg = .erho)
- lambda = eta2theta(eta[, 2], .llambda, earg = .elambda)
- dl.drho = (y + lambda)/rho - y/(1-rho)
- dl.dlambda = 1/lambda - digamma(2*y+lambda) - digamma(y+lambda+1) +
- log(rho)
- drho.deta = dtheta.deta(rho, .lrho, earg = .erho)
- dlambda.deta = dtheta.deta(lambda, .llambda, earg = .elambda)
- c(w) * cbind(dl.drho * drho.deta,
- dl.dlambda * dlambda.deta )
- }), list( .llambda = llambda, .lrho=lrho,
- .elambda = elambda, .erho=erho ))),
- weight = eval(substitute(expression({
- ed2l.drho2 = (mu+lambda) / rho^2 + mu / (1-rho)^2
- d2l.dlambda2 = 1/(lambda^2) + trigamma(2*y+lambda)+trigamma(y+lambda+1)
- ed2l.dlambdarho = -1/rho
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
- wz[,iam(1,1,M)] = ed2l.drho2 * drho.deta^2
- wz[,iam(1,2,M)] = ed2l.dlambdarho * dlambda.deta * drho.deta
- wz[,iam(2,2,M)] = d2l.dlambda2 * dlambda.deta^2
-
- d2rhodeta2 = d2theta.deta2(rho, .lrho, earg = .erho)
- d2lambda.deta2 = d2theta.deta2(lambda, .llambda, earg = .elambda)
- wz = c(w) * wz
- if (intercept.only) {
- pooled.weight = TRUE
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
- wz[,iam(2,2,M)] = sum(wz[,iam(2,2,M)]) / sum(w)
+ if (!is.logical(use.approx) || length(use.approx) != 1)
+ stop("'use.approx' must be logical value")
- } else
- pooled.weight = FALSE
- wz
- }), list( .llambda = llambda, .lrho=lrho,
- .elambda = elambda, .erho=erho ))))
-}
+ new("vglmff",
+ blurb = c("Generalized Poisson distribution\n\n",
+ "Links: ",
+ namesof("lambda", llambda, earg = elambda), ", ",
+ namesof("theta", ltheta, earg = etheta), "\n",
+ "Mean: theta / (1-lambda)\n",
+ "Variance: theta / (1-lambda)^3"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
- genpoisson = function(llambda = "elogit", ltheta = "loge",
- elambda = if (llambda == "elogit") list(min = -1, max = 1) else list(),
- etheta = list(),
- ilambda = NULL, itheta = NULL,
- use.approx = TRUE,
- imethod = 1, zero = 1)
-{
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
- if (mode(llambda) != "character" && mode(llambda) != "name")
- llambda = as.character(substitute(llambda))
- if (mode(ltheta) != "character" && mode(ltheta) != "name")
- ltheta = as.character(substitute(ltheta))
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
- if (!is.list(elambda)) elambda = list()
- if (!is.list(etheta)) etheta = list()
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
- if (!is.logical(use.approx) || length(use.approx) != 1)
- stop("'use.approx' must be logical value")
+ predictors.names <-
+ c(namesof("lambda", .llambda , earg = .elambda , tag = FALSE),
+ namesof("theta", .ltheta , earg = .etheta, tag = FALSE))
+ init.lambda = if ( .imethod == 1)
+ 1 - sqrt(weighted.mean(y, w) / var(y)) else 0.5
+ init.theta = if ( .imethod == 1)
+ sqrt((0.01 + weighted.mean(y, w)^3) / var(y)) else
+ median(y) * (1-init.lambda)
+ if (init.theta <= 0)
+ init.theta = 0.1
+ cutpt = if (init.lambda < 0) {
+ mmm = max(trunc(-init.theta / init.lambda), 4)
+ max(-1, -init.theta /mmm)
+ } else -1
+ if (init.lambda <= cutpt)
+ init.lambda = cutpt + 0.1
+ if (init.lambda >= 1)
+ init.lambda = 0.9
+ if (!length(etastart)) {
+ lambda = rep(if (length( .ilambda)) .ilambda else
+ init.lambda, length = n)
+ theta = rep(if (length( .itheta)) .itheta else init.theta ,
+ length = n)
+ etastart <-
+ cbind(theta2eta(lambda, .llambda , earg = .elambda ),
+ theta2eta(theta, .ltheta , earg = .etheta ))
+ }
+ }), list( .ltheta = ltheta, .llambda = llambda,
+ .etheta = etheta, .elambda = elambda,
+ .imethod = imethod,
+ .itheta = itheta, .ilambda = ilambda )) ),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ lambda = eta2theta(eta[, 1], .llambda , earg = .elambda )
+ theta = eta2theta(eta[, 2], .ltheta , earg = .etheta )
+ theta / (1 - lambda)
+ }, list( .ltheta = ltheta, .llambda = llambda,
+ .etheta = etheta, .elambda = elambda ))),
+ last = eval(substitute(expression({
+ misc$link <- c(lambda = .llambda , theta = .ltheta )
+ misc$earg <- list(lambda = .elambda , theta = .etheta )
+ if (! .use.approx )
+ misc$pooled.weight <- pooled.weight
+ }), list( .ltheta = ltheta, .llambda = llambda,
+ .use.approx = use.approx,
+ .etheta = etheta, .elambda = elambda ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ lambda = eta2theta(eta[, 1], .llambda , earg = .elambda )
+ theta = eta2theta(eta[, 2], .ltheta , earg = .etheta )
+ index = (y == 0)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(w[index] * (-theta[index])) +
+ sum(w[!index] * (-y[!index]*lambda[!index]-theta[!index] +
+ (y[!index]-1)*log(theta[!index]+y[!index]*lambda[!index]) +
+ log(theta[!index]) - lgamma(y[!index]+1)) )
+ }, list( .ltheta = ltheta, .llambda = llambda,
+ .etheta = etheta, .elambda = elambda ))),
+ vfamily = c("genpoisson"),
+ deriv = eval(substitute(expression({
+ lambda = eta2theta(eta[, 1], .llambda , earg = .elambda )
+ theta = eta2theta(eta[, 2], .ltheta , earg = .etheta )
+ dl.dlambda = -y + y*(y-1)/(theta+y*lambda)
+ dl.dtheta = -1 + (y-1)/(theta+y*lambda) + 1/theta
+ dTHETA.deta = dtheta.deta(theta, .ltheta , earg = .etheta )
+ dlambda.deta = dtheta.deta(lambda, .llambda , earg = .elambda )
+ c(w) * cbind(dl.dlambda * dlambda.deta,
+ dl.dtheta * dTHETA.deta )
+ }), list( .ltheta = ltheta, .llambda = llambda,
+ .etheta = etheta, .elambda = elambda ))),
+ weight = eval(substitute(expression({
+ wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
+ if ( .use.approx ) {
+ BBB = (theta+2)*(theta+2*lambda-theta*lambda)-(theta^2)*(1-lambda)
+ d2l.dlambda2 = 2 * theta * (theta+2) / ((1-lambda) * BBB)
+ d2l.dtheta2 = 2 * (1 + lambda * (2/theta - 1)) / BBB
+ d2l.dthetalambda = 2 * theta / BBB
+ wz[, iam(1, 1, M)] = d2l.dlambda2 * dlambda.deta^2
+ wz[, iam(2, 2, M)] = d2l.dtheta2 * dTHETA.deta^2
+ wz[, iam(1, 2, M)] = d2l.dthetalambda * dTHETA.deta * dlambda.deta
+ wz = c(w) * wz
+ } else {
+ d2l.dlambda2 = -y^2 * (y-1) / (theta+y*lambda)^2
+ d2l.dtheta2 = -(y-1)/(theta+y*lambda)^2 - 1 / theta^2
+ d2l.dthetalambda = -y * (y-1) / (theta+y*lambda)^2
+ wz[, iam(1, 1, M)] = -d2l.dlambda2 * dlambda.deta^2
+ wz[, iam(2, 2, M)] = -d2l.dtheta2 * dTHETA.deta^2
+ wz[, iam(1, 2, M)] = -d2l.dthetalambda * dTHETA.deta * dlambda.deta
+
+ d2THETA.deta2 = d2theta.deta2(theta, .ltheta , earg = .etheta )
+ d2lambdadeta2 = d2theta.deta2(lambda, .llambda , earg = .elambda )
+ wz[, iam(1, 1, M)] = wz[, iam(1, 1, M)] - dl.dlambda * d2lambdadeta2
+ wz[, iam(2, 2, M)] = wz[, iam(2, 2, M)] - dl.dtheta * d2THETA.deta2
+ wz = c(w) * wz
- new("vglmff",
- blurb = c("Generalized Poisson distribution\n\n",
- "Links: ",
- namesof("lambda", llambda, earg = elambda), ", ",
- namesof("theta", ltheta, earg = etheta), "\n",
- "Mean: theta / (1-lambda)\n",
- "Variance: theta / (1-lambda)^3"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("lambda", .llambda, earg = .elambda, tag = FALSE),
- namesof("theta", .ltheta, earg = .etheta, tag = FALSE))
- init.lambda = if ( .imethod == 1)
- 1 - sqrt(weighted.mean(y, w) / var(y)) else 0.5
- init.theta = if ( .imethod == 1)
- sqrt((0.01 + weighted.mean(y, w)^3) / var(y)) else
- median(y) * (1-init.lambda)
- if (init.theta <= 0)
- init.theta = 0.1
- cutpt = if (init.lambda < 0) {
- mmm = max(trunc(-init.theta / init.lambda), 4)
- max(-1, -init.theta /mmm)
- } else -1
- if (init.lambda <= cutpt)
- init.lambda = cutpt + 0.1
- if (init.lambda >= 1)
- init.lambda = 0.9
- if (!length(etastart)) {
- lambda = rep(if (length( .ilambda)) .ilambda else
- init.lambda, length = n)
- theta = rep(if (length( .itheta)) .itheta else init.theta,
- length = n)
- etastart = cbind(theta2eta(lambda, .llambda, earg = .elambda),
- theta2eta(theta, .ltheta, earg = .etheta))
+ if (intercept.only) {
+ sumw = sum(w)
+ for(ii in 1:ncol(wz))
+ wz[, ii] = sum(wz[, ii]) / sumw
+ pooled.weight = TRUE
+ wz = c(w) * wz # Put back the weights
+ } else
+ pooled.weight = FALSE
}
- }), list( .ltheta = ltheta, .llambda = llambda,
- .etheta = etheta, .elambda = elambda,
- .imethod = imethod,
- .itheta = itheta, .ilambda = ilambda )) ),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
- theta = eta2theta(eta[, 2], .ltheta, earg = .etheta)
- theta / (1 - lambda)
- }, list( .ltheta = ltheta, .llambda = llambda,
- .etheta = etheta, .elambda = elambda ))),
- last = eval(substitute(expression({
- misc$link = c(lambda = .llambda , theta = .ltheta )
- misc$earg = list(lambda = .elambda , theta = .etheta )
- if (! .use.approx )
- misc$pooled.weight = pooled.weight
- }), list( .ltheta = ltheta, .llambda = llambda,
- .use.approx = use.approx,
- .etheta = etheta, .elambda = elambda ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- lambda = eta2theta(eta[, 1], .llambda , earg = .elambda )
- theta = eta2theta(eta[, 2], .ltheta , earg = .etheta )
- index = (y == 0)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w[index] * (-theta[index])) +
- sum(w[!index] * (-y[!index]*lambda[!index]-theta[!index] +
- (y[!index]-1)*log(theta[!index]+y[!index]*lambda[!index]) +
- log(theta[!index]) - lgamma(y[!index]+1)) )
- }, list( .ltheta = ltheta, .llambda = llambda,
- .etheta = etheta, .elambda = elambda ))),
- vfamily = c("genpoisson"),
- deriv = eval(substitute(expression({
- lambda = eta2theta(eta[, 1], .llambda, earg = .elambda)
- theta = eta2theta(eta[, 2], .ltheta, earg = .etheta)
- dl.dlambda = -y + y*(y-1)/(theta+y*lambda)
- dl.dtheta = -1 + (y-1)/(theta+y*lambda) + 1/theta
- dTHETA.deta = dtheta.deta(theta, .ltheta, earg = .etheta)
- dlambda.deta = dtheta.deta(lambda, .llambda, earg = .elambda)
- c(w) * cbind(dl.dlambda * dlambda.deta,
- dl.dtheta * dTHETA.deta )
- }), list( .ltheta = ltheta, .llambda = llambda,
- .etheta = etheta, .elambda = elambda ))),
- weight = eval(substitute(expression({
- wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
- if ( .use.approx ) {
- BBB = (theta+2)*(theta+2*lambda-theta*lambda)-(theta^2)*(1-lambda)
- d2l.dlambda2 = 2 * theta * (theta+2) / ((1-lambda) * BBB)
- d2l.dtheta2 = 2 * (1 + lambda * (2/theta - 1)) / BBB
- d2l.dthetalambda = 2 * theta / BBB
- wz[,iam(1,1,M)] = d2l.dlambda2 * dlambda.deta^2
- wz[,iam(2,2,M)] = d2l.dtheta2 * dTHETA.deta^2
- wz[,iam(1,2,M)] = d2l.dthetalambda * dTHETA.deta * dlambda.deta
- wz = c(w) * wz
- } else {
- d2l.dlambda2 = -y^2 * (y-1) / (theta+y*lambda)^2
- d2l.dtheta2 = -(y-1)/(theta+y*lambda)^2 - 1 / theta^2
- d2l.dthetalambda = -y * (y-1) / (theta+y*lambda)^2
- wz[,iam(1,1,M)] = -d2l.dlambda2 * dlambda.deta^2
- wz[,iam(2,2,M)] = -d2l.dtheta2 * dTHETA.deta^2
- wz[,iam(1,2,M)] = -d2l.dthetalambda * dTHETA.deta * dlambda.deta
-
- d2THETA.deta2 = d2theta.deta2(theta, .ltheta, earg = .etheta)
- d2lambdadeta2 = d2theta.deta2(lambda, .llambda, earg = .elambda)
- wz[,iam(1,1,M)] = wz[,iam(1,1,M)] - dl.dlambda * d2lambdadeta2
- wz[,iam(2,2,M)] = wz[,iam(2,2,M)] - dl.dtheta * d2THETA.deta2
- wz = c(w) * wz
-
- if (intercept.only) {
- sumw = sum(w)
- for(ii in 1:ncol(wz))
- wz[,ii] = sum(wz[,ii]) / sumw
- pooled.weight = TRUE
- wz = c(w) * wz # Put back the weights
- } else
- pooled.weight = FALSE
- }
- wz
- }), list( .ltheta = ltheta, .llambda = llambda,
- .use.approx = use.approx,
- .etheta = etheta, .elambda = elambda ))))
+ wz
+ }), list( .ltheta = ltheta, .llambda = llambda,
+ .use.approx = use.approx,
+ .etheta = etheta, .elambda = elambda ))))
}
@@ -5083,8 +5770,8 @@ rsimplex = function(n, mu = 0.5, dispersion = 1) {
-dlgamma = function(x, location = 0, scale = 1, k = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
+dlgamma <- function(x, location = 0, scale = 1, k = 1, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -5101,7 +5788,7 @@ dlgamma = function(x, location = 0, scale = 1, k = 1, log = FALSE) {
}
-plgamma = function(q, location = 0, scale = 1, k = 1) {
+plgamma <- function(q, location = 0, scale = 1, k = 1) {
zedd = (q - location) / scale
ans = pgamma(exp(zedd), k)
@@ -5110,7 +5797,7 @@ plgamma = function(q, location = 0, scale = 1, k = 1) {
}
-qlgamma = function(p, location = 0, scale = 1, k = 1) {
+qlgamma <- function(p, location = 0, scale = 1, k = 1) {
if (!is.Numeric(scale, positive = TRUE))
stop("bad input for argument 'scale'")
@@ -5120,7 +5807,7 @@ qlgamma = function(p, location = 0, scale = 1, k = 1) {
}
-rlgamma = function(n, location = 0, scale = 1, k = 1) {
+rlgamma <- function(n, location = 0, scale = 1, k = 1) {
ans = location + scale * log(rgamma(n, k))
ans[scale < 0] = NaN
ans
@@ -5128,59 +5815,68 @@ rlgamma = function(n, location = 0, scale = 1, k = 1) {
- lgammaff = function(link = "loge", earg = list(), init.k = NULL)
+ lgammaff <- function(link = "loge", init.k = NULL)
{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
- new("vglmff",
- blurb = c("Log-gamma distribution f(y) = exp(ky - e^y)/gamma(k)), k>0\n\n",
- "Link: ",
- namesof("k", link, earg = earg), "\n", "\n",
- "Mean: digamma(k)", "\n"),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names = namesof("k", .link, earg = .earg, tag = FALSE)
- if (!length(etastart)) {
- k.init = if (length( .init.k))
- rep( .init.k, length.out = length(y)) else {
- medy = median(y)
- if (medy < 2) 5 else if (medy < 4) 20 else exp(0.7 * medy)
- }
- etastart = theta2eta(k.init, .link, earg = .earg )
- }
- }), list( .link = link, .earg = earg, .init.k=init.k ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- k = eta2theta(eta, .link, earg = .earg )
- digamma(k)
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = c(k= .link )
- misc$earg = list(k= .earg )
- misc$expected = TRUE
- }), list( .link = link, .earg = earg ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- kk = eta2theta(eta, .link, earg = .earg )
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dlgamma(x = y, location = 0, scale = 1, k=kk, log = TRUE))
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+ new("vglmff",
+ blurb = c("Log-gamma distribution ",
+ "f(y) = exp(ky - e^y)/gamma(k)), k>0\n\n",
+ "Link: ",
+ namesof("k", link, earg = earg), "\n", "\n",
+ "Mean: digamma(k)", "\n"),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+ predictors.names <-
+ namesof("k", .link , earg = .earg , tag = FALSE)
+
+ if (!length(etastart)) {
+ k.init = if (length( .init.k))
+ rep( .init.k, length.out = length(y)) else {
+ medy = median(y)
+ if (medy < 2) 5 else if (medy < 4) 20 else exp(0.7 * medy)
}
+ etastart <- theta2eta(k.init, .link , earg = .earg )
+ }
+ }), list( .link = link, .earg = earg, .init.k = init.k ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ k = eta2theta(eta, .link , earg = .earg )
+ digamma(k)
+ }, list( .link = link, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link <- c(k = .link )
+ misc$earg <- list(k = .earg )
+ misc$expected <- TRUE
+ }), list( .link = link, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ kk = eta2theta(eta, .link , earg = .earg )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dlgamma(x = y, location = 0, scale = 1,
+ k = kk, log = TRUE))
+ }
}, list( .link = link, .earg = earg ))),
- vfamily = c("lgammaff"),
- deriv = eval(substitute(expression({
- k = eta2theta(eta, .link, earg = .earg )
- dl.dk = y - digamma(k)
- dk.deta = dtheta.deta(k, .link, earg = .earg )
- c(w) * dl.dk * dk.deta
- }), list( .link = link, .earg = earg ))),
- weight = eval(substitute(expression({
- ed2l.dk2 = trigamma(k)
- wz = c(w) * dk.deta^2 * ed2l.dk2
- wz
- }), list( .link = link, .earg = earg ))))
+ vfamily = c("lgammaff"),
+ deriv = eval(substitute(expression({
+ kk = eta2theta(eta, .link , earg = .earg )
+ dl.dk = y - digamma(kk)
+ dk.deta = dtheta.deta(kk, .link , earg = .earg )
+ c(w) * dl.dk * dk.deta
+ }), list( .link = link, .earg = earg ))),
+ weight = eval(substitute(expression({
+ ned2l.dk2 = trigamma(kk)
+ wz = c(w) * dk.deta^2 * ned2l.dk2
+ wz
+ }), list( .link = link, .earg = earg ))))
}
@@ -5189,261 +5885,302 @@ rlgamma = function(n, location = 0, scale = 1, k = 1) {
- lgamma3ff = function(
+ lgamma3ff <- function(
llocation = "identity", lscale = "loge", lshape = "loge",
- elocation = list(), escale = list(), eshape = list(),
ilocation = NULL, iscale = NULL, ishape = 1, zero = NULL)
{
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
- if (length(iscale) &&
- !is.Numeric(iscale, positive = TRUE))
- stop("bad input for argument 'iscale'")
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+ if (length(iscale) &&
+ !is.Numeric(iscale, positive = TRUE))
+ stop("bad input for argument 'iscale'")
- if (!is.list(elocation)) elocation = list()
- if (!is.list(escale)) escale = list()
- if (!is.list(eshape)) eshape = list()
- new("vglmff",
- blurb = c("Log-gamma distribution",
- " f(y) = exp(k(y-a)/b - e^((y-a)/b))/(b*gamma(k)), ",
- "location=a, scale=b>0, shape=k>0\n\n",
- "Links: ",
- namesof("location", llocation, earg = elocation), ", ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("shape", lshape, earg = eshape), "\n\n",
- "Mean: a + b*digamma(k)", "\n"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("location", .llocation, earg = .elocation, tag = FALSE),
- namesof("scale", .lscale, earg = .escale, tag = FALSE),
- namesof("shape", .lshape, earg = .eshape, tag = FALSE))
- if (!length(etastart)) {
- k.init = if (length( .ishape))
- rep( .ishape, length.out = length(y)) else {
- rep(exp(median(y)), length.out = length(y))
- }
- scale.init = if (length( .iscale))
- rep( .iscale, length.out = length(y)) else {
- rep(sqrt(var(y) / trigamma(k.init)), length.out = length(y))
- }
- loc.init = if (length( .iloc))
- rep( .iloc, length.out = length(y)) else {
- rep(median(y) - scale.init * digamma(k.init),
- length.out = length(y))
- }
- etastart =
- cbind(theta2eta(loc.init, .llocation, earg = .elocation),
- theta2eta(scale.init, .lscale, earg = .escale),
- theta2eta(k.init, .lshape, earg = .eshape))
- }
- }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
- .elocation = elocation, .escale = escale, .eshape = eshape,
- .iloc=ilocation, .iscale = iscale, .ishape = ishape ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[, 1], .llocation, earg = .elocation) +
- eta2theta(eta[, 2], .lscale, earg = .escale) *
- digamma(eta2theta(eta[, 3], .lshape, earg = .eshape))
- }, list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
- .elocation = elocation, .escale = escale, .eshape = eshape ))),
- last = eval(substitute(expression({
- misc$link = c(location= .llocation, scale = .lscale, shape = .lshape)
- misc$earg = list(location= .elocation, scale = .escale, shape = .eshape)
- }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
- .elocation = elocation, .escale = escale, .eshape = eshape ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = eta2theta(eta[, 1], .llocation, earg = .elocation)
- bb = eta2theta(eta[, 2], .lscale, earg = .escale)
- kk = eta2theta(eta[, 3], .lshape, earg = .eshape)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dlgamma(x = y, location=aa, scale=bb, k=kk, log = TRUE))
- }
- }, list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
- .elocation = elocation, .escale = escale, .eshape = eshape ))),
- vfamily = c("lgamma3ff"),
- deriv = eval(substitute(expression({
- a = eta2theta(eta[, 1], .llocation, earg = .elocation)
- b = eta2theta(eta[, 2], .lscale, earg = .escale)
- k = eta2theta(eta[, 3], .lshape, earg = .eshape)
- zedd = (y-a)/b
- dl.da = (exp(zedd) - k) / b
- dl.db = (zedd * (exp(zedd) - k) - 1) / b
- dl.dk = zedd - digamma(k)
- da.deta = dtheta.deta(a, .llocation, earg = .elocation)
- db.deta = dtheta.deta(b, .lscale, earg = .escale)
- dk.deta = dtheta.deta(k, .lshape, earg = .eshape)
- c(w) * cbind(dl.da * da.deta,
- dl.db * db.deta,
- dl.dk * dk.deta)
- }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
- .elocation = elocation, .escale = escale, .eshape = eshape ))),
- weight = eval(substitute(expression({
- ed2l.da2 = k / b^2
- ed2l.db2 = (1 + k*(trigamma(k+1) + (digamma(k+1))^2)) / b^2
- ed2l.dk2 = trigamma(k)
- ed2l.dadb = (1 + k*digamma(k)) / b^2
- ed2l.dadk = 1 / b
- ed2l.dbdk = digamma(k) / b
- wz = matrix(as.numeric(NA), n, dimm(M))
- wz[,iam(1,1,M)] = ed2l.da2 * da.deta^2
- wz[,iam(2,2,M)] = ed2l.db2 * db.deta^2
- wz[,iam(3,3,M)] = ed2l.dk2 * dk.deta^2
- wz[,iam(1,2,M)] = ed2l.dadb * da.deta * db.deta
- wz[,iam(1,3,M)] = ed2l.dadk * da.deta * dk.deta
- wz[,iam(2,3,M)] = ed2l.dbdk * db.deta * dk.deta
- wz = c(w) * wz
- wz
- }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
- .elocation = elocation, .escale = escale, .eshape = eshape ))))
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
+ ilocat <- ilocation
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+ lshape <- as.list(substitute(lshape))
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
+
+
+
+
+ new("vglmff",
+ blurb = c("Log-gamma distribution",
+ " f(y) = exp(k(y-a)/b - e^((y-a)/b))/(b*gamma(k)), ",
+ "location=a, scale=b>0, shape=k>0\n\n",
+ "Links: ",
+ namesof("location", llocat, earg = elocat), ", ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape", lshape, earg = eshape), "\n\n",
+ "Mean: a + b*digamma(k)", "\n"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+
+ predictors.names <-
+ c(namesof("location", .llocat , earg = .elocat , tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE),
+ namesof("shape", .lshape , earg = .eshape, tag = FALSE))
+
+
+ if (!length(etastart)) {
+ k.init = if (length( .ishape ))
+ rep( .ishape, length.out = length(y)) else {
+ rep(exp(median(y)), length.out = length(y))
+ }
+ scale.init = if (length( .iscale ))
+ rep( .iscale, length.out = length(y)) else {
+ rep(sqrt(var(y) / trigamma(k.init)), length.out = length(y))
+ }
+ loc.init = if (length( .ilocat ))
+ rep( .ilocat, length.out = length(y)) else {
+ rep(median(y) - scale.init * digamma(k.init),
+ length.out = length(y))
+ }
+ etastart <-
+ cbind(theta2eta(loc.init, .llocat , earg = .elocat ),
+ theta2eta(scale.init, .lscale , earg = .escale ),
+ theta2eta(k.init, .lshape , earg = .eshape ))
+ }
+ }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape,
+ .ilocat = ilocat, .iscale = iscale, .ishape = ishape ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[, 1], .llocat , earg = .elocat ) +
+ eta2theta(eta[, 2], .lscale , earg = .escale ) *
+ digamma(eta2theta(eta[, 3], .lshape , earg = .eshape ))
+ }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape))),
+ last = eval(substitute(expression({
+ misc$link <- c(location = .llocat , scale = .lscale ,
+ shape = .lshape)
+
+ misc$earg <- list(location = .elocat , scale = .escale ,
+ shape = .eshape )
+ }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa = eta2theta(eta[, 1], .llocat , earg = .elocat )
+ bb = eta2theta(eta[, 2], .lscale , earg = .escale )
+ kk = eta2theta(eta[, 3], .lshape , earg = .eshape )
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dlgamma(x = y, locat=aa, scale=bb, k=kk, log = TRUE))
+ }
+ }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape))),
+ vfamily = c("lgamma3ff"),
+ deriv = eval(substitute(expression({
+ a = eta2theta(eta[, 1], .llocat , earg = .elocat )
+ b = eta2theta(eta[, 2], .lscale , earg = .escale )
+ k = eta2theta(eta[, 3], .lshape , earg = .eshape )
+
+ zedd = (y-a)/b
+ dl.da = (exp(zedd) - k) / b
+ dl.db = (zedd * (exp(zedd) - k) - 1) / b
+ dl.dk = zedd - digamma(k)
+
+ da.deta = dtheta.deta(a, .llocat , earg = .elocat )
+ db.deta = dtheta.deta(b, .lscale , earg = .escale )
+ dk.deta = dtheta.deta(k, .lshape , earg = .eshape )
+
+ c(w) * cbind(dl.da * da.deta,
+ dl.db * db.deta,
+ dl.dk * dk.deta)
+ }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape))),
+ weight = eval(substitute(expression({
+ ned2l.da2 = k / b^2
+ ned2l.db2 = (1 + k*(trigamma(k+1) + (digamma(k+1))^2)) / b^2
+ ned2l.dk2 = trigamma(k)
+ ned2l.dadb = (1 + k*digamma(k)) / b^2
+ ned2l.dadk = 1 / b
+ ned2l.dbdk = digamma(k) / b
+
+ wz = matrix(as.numeric(NA), n, dimm(M))
+ wz[, iam(1, 1, M)] = ned2l.da2 * da.deta^2
+ wz[, iam(2, 2, M)] = ned2l.db2 * db.deta^2
+ wz[, iam(3, 3, M)] = ned2l.dk2 * dk.deta^2
+ wz[, iam(1, 2, M)] = ned2l.dadb * da.deta * db.deta
+ wz[, iam(1, 3, M)] = ned2l.dadk * da.deta * dk.deta
+ wz[, iam(2, 3, M)] = ned2l.dbdk * db.deta * dk.deta
+ wz = c(w) * wz
+ wz
+ }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape))))
}
- prentice74 = function(
+ prentice74 <- function(
llocation = "identity", lscale = "loge", lshape = "identity",
- elocation = list(), escale = list(), eshape = list(),
ilocation = NULL, iscale = NULL, ishape = NULL, zero = 2:3)
{
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
- if (length(iscale) &&
- !is.Numeric(iscale, positive = TRUE))
- stop("bad input for argument 'iscale'")
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+ if (length(iscale) &&
+ !is.Numeric(iscale, positive = TRUE))
+ stop("bad input for argument 'iscale'")
+
+
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
+ ilocat <- ilocation
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+ lshape <- as.list(substitute(lshape))
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
+
+
+
+ new("vglmff",
+ blurb = c("Log-gamma distribution (Prentice, 1974)",
+ " f(y) = |q| * exp(w/q^2 - e^w) / (b*gamma(1/q^2)) ,\n",
+ "w=(y-a)*q/b + digamma(1/q^2), location=a, scale=b>0, shape=q\n\n",
+ "Links: ",
+ namesof("location", llocat, earg = elocat), ", ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape", lshape, earg = eshape), "\n", "\n",
+ "Mean: a", "\n"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+ predictors.names <-
+ c(namesof("location", .llocat , earg = .elocat , tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE),
+ namesof("shape", .lshape , earg = .eshape, tag = FALSE))
- if (!is.list(elocation)) elocation = list()
- if (!is.list(escale)) escale = list()
- if (!is.list(eshape)) eshape = list()
- new("vglmff",
- blurb = c("Log-gamma distribution (Prentice, 1974)",
- " f(y) = |q| * exp(w/q^2 - e^w) / (b*gamma(1/q^2)) ,\n",
- "w=(y-a)*q/b + digamma(1/q^2), location=a, scale=b>0, shape=q\n\n",
- "Links: ",
- namesof("location", llocation, earg = elocation), ", ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("shape", lshape, earg = eshape), "\n", "\n",
- "Mean: a", "\n"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("location", .llocation, earg = .elocation, tag = FALSE),
- namesof("scale", .lscale, earg = .escale, tag = FALSE),
- namesof("shape", .lshape, earg = .eshape, tag = FALSE))
- if (!length(etastart)) {
- sdy = sqrt(var(y))
- k.init = if (length( .ishape))
- rep( .ishape, length.out = length(y)) else {
- skewness = mean((y-mean(y))^3) / sdy^3 # <0 Left Skewed
- rep(-skewness, length.out = length(y))
- }
- scale.init = if (length( .iscale))
- rep( .iscale, length.out = length(y)) else {
- rep(sdy, length.out = length(y))
- }
- loc.init = if (length( .iloc)) rep( .iloc, length.out = length(y)) else {
- rep(median(y), length.out = length(y))
- }
- etastart = cbind(theta2eta(loc.init, .llocation, earg = .elocation),
- theta2eta(scale.init, .lscale, earg = .escale),
- theta2eta(k.init, .lshape, earg = .eshape))
+
+ if (!length(etastart)) {
+ sdy = sqrt(var(y))
+ k.init = if (length( .ishape ))
+ rep( .ishape, length.out = length(y)) else {
+ skewness = mean((y-mean(y))^3) / sdy^3 # <0 Left Skewed
+ rep(-skewness, length.out = length(y))
}
- }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
- .elocation = elocation, .escale = escale, .eshape = eshape,
- .iloc=ilocation, .iscale = iscale, .ishape = ishape ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[, 1], .llocation, earg = .elocation)
- }, list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
- .elocation = elocation, .escale = escale, .eshape = eshape ))),
- last = eval(substitute(expression({
- misc$link = c(location= .llocation, scale = .lscale, shape = .lshape)
- misc$earg = list(location= .elocation, scale = .escale, shape = .eshape)
- }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
- .elocation = elocation, .escale = escale, .eshape = eshape ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- a = eta2theta(eta[, 1], .llocation, earg = .elocation)
- b = eta2theta(eta[, 2], .lscale, earg = .escale)
- k = eta2theta(eta[, 3], .lshape, earg = .eshape)
- tmp55 = k^(-2)
- doubw = (y-a)*k/b + digamma(tmp55)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else
- sum(w*(log(abs(k)) -log(b) -lgamma(tmp55) + doubw*tmp55 -exp(doubw )))
- }, list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
- .elocation = elocation, .escale = escale, .eshape = eshape ))),
- vfamily = c("prentice74"),
- deriv = eval(substitute(expression({
- a = eta2theta(eta[, 1], .llocation, earg = .elocation)
- b = eta2theta(eta[, 2], .lscale, earg = .escale)
- k = eta2theta(eta[, 3], .lshape, earg = .eshape)
- tmp55 = k^(-2)
- mustar = digamma(tmp55)
- doubw = (y-a)*k/b + mustar
- sigmastar2 = trigamma(tmp55)
- dl.da = k*(exp(doubw) - tmp55) / b
- dl.db = ((doubw - mustar) * (exp(doubw) - tmp55) - 1) / b
- dl.dk = 1/k - 2 * (doubw - mustar) / k^3 - (exp(doubw) - tmp55) *
- ((doubw - mustar) / k - 2 * sigmastar2 / k^3)
- da.deta = dtheta.deta(a, .llocation, earg = .elocation)
- db.deta = dtheta.deta(b, .lscale, earg = .escale)
- dk.deta = dtheta.deta(k, .lshape, earg = .eshape)
- c(w) * cbind(dl.da * da.deta,
- dl.db * db.deta,
- dl.dk * dk.deta)
- }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
- .elocation = elocation, .escale = escale, .eshape = eshape ))),
- weight = eval(substitute(expression({
- ed2l.da2 = 1 / b^2
- ed2l.db2 = (1 + sigmastar2*tmp55) / b^2
- ed2l.dk2 = tmp55 - 3*sigmastar2*tmp55^2 + 4*sigmastar2*tmp55^4 *
- (sigmastar2 - k^2)
- ed2l.dadb = k / b^2
- ed2l.dadk = (2*(sigmastar2*tmp55^2 - tmp55) - 1) / b
- ed2l.dbdk = (sigmastar2*tmp55 - 1) / (b*k)
- wz = matrix(as.numeric(NA), n, dimm(M))
- wz[,iam(1,1,M)] = ed2l.da2 * da.deta^2
- wz[,iam(2,2,M)] = ed2l.db2 * db.deta^2
- wz[,iam(3,3,M)] = ed2l.dk2 * dk.deta^2
- wz[,iam(1,2,M)] = ed2l.dadb * da.deta * db.deta
- wz[,iam(1,3,M)] = ed2l.dadk * da.deta * dk.deta
- wz[,iam(2,3,M)] = ed2l.dbdk * db.deta * dk.deta
- wz = c(w) * wz
- wz
- }), list( .llocation = llocation, .lscale = lscale, .lshape = lshape,
- .elocation = elocation, .escale = escale, .eshape = eshape ))))
+ scale.init = if (length( .iscale ))
+ rep( .iscale, length.out = length(y)) else {
+ rep(sdy, length.out = length(y))
+ }
+ loc.init = if (length( .iloc ))
+ rep( .iloc, length.out = length(y)) else {
+ rep(median(y), length.out = length(y))
+ }
+ etastart <-
+ cbind(theta2eta(loc.init, .llocat , earg = .elocat ),
+ theta2eta(scale.init, .lscale , earg = .escale ),
+ theta2eta(k.init, .lshape , earg = .eshape ))
+ }
+ }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape,
+ .iloc = ilocat, .iscale = iscale, .ishape = ishape ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta2theta(eta[, 1], .llocat , earg = .elocat )
+ }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape))),
+ last = eval(substitute(expression({
+ misc$link <- c(location = .llocat , scale = .lscale ,
+ shape = .lshape )
+ misc$earg <- list(location = .elocat , scale = .escale ,
+ shape = .eshape )
+ }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ a = eta2theta(eta[, 1], .llocat , earg = .elocat )
+ b = eta2theta(eta[, 2], .lscale , earg = .escale )
+ k = eta2theta(eta[, 3], .lshape , earg = .eshape )
+ tmp55 = k^(-2)
+ doubw = (y-a)*k/b + digamma(tmp55)
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else
+ sum(c(w)*(log(abs(k)) - log(b) - lgamma(tmp55) +
+ doubw * tmp55 - exp(doubw )))
+ }, list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape))),
+ vfamily = c("prentice74"),
+ deriv = eval(substitute(expression({
+ a = eta2theta(eta[, 1], .llocat , earg = .elocat )
+ b = eta2theta(eta[, 2], .lscale , earg = .escale )
+ k = eta2theta(eta[, 3], .lshape , earg = .eshape )
+
+ tmp55 = k^(-2)
+ mustar = digamma(tmp55)
+ doubw = (y-a)*k/b + mustar
+ sigmastar2 = trigamma(tmp55)
+
+ dl.da = k*(exp(doubw) - tmp55) / b
+ dl.db = ((doubw - mustar) * (exp(doubw) - tmp55) - 1) / b
+ dl.dk = 1/k - 2 * (doubw - mustar) / k^3 - (exp(doubw) - tmp55) *
+ ((doubw - mustar) / k - 2 * sigmastar2 / k^3)
+
+ da.deta = dtheta.deta(a, .llocat , earg = .elocat )
+ db.deta = dtheta.deta(b, .lscale , earg = .escale )
+ dk.deta = dtheta.deta(k, .lshape , earg = .eshape )
+
+ c(w) * cbind(dl.da * da.deta,
+ dl.db * db.deta,
+ dl.dk * dk.deta)
+ }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape))),
+ weight = eval(substitute(expression({
+ ned2l.da2 = 1 / b^2
+ ned2l.db2 = (1 + sigmastar2*tmp55) / b^2
+ ned2l.dk2 = tmp55 - 3*sigmastar2*tmp55^2 + 4*sigmastar2*tmp55^4 *
+ (sigmastar2 - k^2)
+ ned2l.dadb = k / b^2
+ ned2l.dadk = (2*(sigmastar2*tmp55^2 - tmp55) - 1) / b
+ ned2l.dbdk = (sigmastar2*tmp55 - 1) / (b*k)
+
+ wz = matrix(as.numeric(NA), n, dimm(M))
+ wz[, iam(1, 1, M)] = ned2l.da2 * da.deta^2
+ wz[, iam(2, 2, M)] = ned2l.db2 * db.deta^2
+ wz[, iam(3, 3, M)] = ned2l.dk2 * dk.deta^2
+ wz[, iam(1, 2, M)] = ned2l.dadb * da.deta * db.deta
+ wz[, iam(1, 3, M)] = ned2l.dadk * da.deta * dk.deta
+ wz[, iam(2, 3, M)] = ned2l.dbdk * db.deta * dk.deta
+ wz = c(w) * wz
+ wz
+ }), list( .llocat = llocat, .lscale = lscale, .lshape = lshape,
+ .elocat = elocat, .escale = escale, .eshape = eshape))))
}
-dgengamma = function(x, scale = 1, d = 1, k = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
+dgengamma <- function(x, scale = 1, d = 1, k = 1, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -5477,7 +6214,7 @@ dgengamma = function(x, scale = 1, d = 1, k = 1, log = FALSE) {
-pgengamma = function(q, scale = 1, d = 1, k = 1) {
+pgengamma <- function(q, scale = 1, d = 1, k = 1) {
zedd = (q / scale)^d
ans = pgamma(zedd, k)
ans[scale < 0] = NaN
@@ -5486,7 +6223,7 @@ pgengamma = function(q, scale = 1, d = 1, k = 1) {
}
-qgengamma = function(p, scale = 1, d = 1, k = 1) {
+qgengamma <- function(p, scale = 1, d = 1, k = 1) {
ans = scale * qgamma(p, k)^(1/d)
ans[scale < 0] = NaN
ans[d <= 0] = NaN
@@ -5494,7 +6231,7 @@ qgengamma = function(p, scale = 1, d = 1, k = 1) {
}
-rgengamma = function(n, scale = 1, d = 1, k = 1) {
+rgengamma <- function(n, scale = 1, d = 1, k = 1) {
ans = scale * rgamma(n, k)^(1/d)
ans[scale < 0] = NaN
@@ -5503,134 +6240,157 @@ rgengamma = function(n, scale = 1, d = 1, k = 1) {
}
- gengamma = function(lscale = "loge", ld = "loge", lk = "loge",
- escale = list(), ed = list(), ek = list(),
- iscale = NULL, id = NULL, ik = NULL, zero = NULL)
+ gengamma <- function(lscale = "loge", ld = "loge", lk = "loge",
+ iscale = NULL, id = NULL, ik = NULL, zero = NULL)
{
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (mode(ld) != "character" && mode(ld) != "name")
- ld = as.character(substitute(ld))
- if (mode(lk) != "character" && mode(lk) != "name")
- lk = as.character(substitute(lk))
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
- if (length(iscale) &&
- !is.Numeric(iscale, positive = TRUE))
- stop("bad input for argument 'iscale'")
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
- if (!is.list(escale)) escale = list()
- if (!is.list(ed)) ed = list()
- if (!is.list(ek)) ek = list()
+ ld <- as.list(substitute(ld))
+ ed <- link2list(ld)
+ ld <- attr(ed, "function.name")
- new("vglmff",
- blurb = c("Generalized gamma distribution",
- " f(y) = d * b^(-d*k) * y^(d*k-1) * exp(-(y/b)^d) / gamma(k),\n",
- "scale=b>0, d>0, k>0, y>0\n\n",
- "Links: ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("d", ld, earg = ed), ", ",
- namesof("k", lk, earg = ek), "\n", "\n",
- "Mean: b*k", "\n"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- if (any(y <= 0)) stop("response must be have positive values only")
- predictors.names =
- c(namesof("scale", .lscale, earg = .escale, tag = FALSE),
- namesof("d", .ld, earg = .ed, tag = FALSE),
- namesof("k", .lk, earg = .ek, tag = FALSE))
- if (!length(etastart)) {
- b.init = if (length( .iscale))
- rep( .iscale, length.out = length(y)) else {
- rep(mean(y^2) / mean(y), length.out = length(y))
- }
- k.init = if (length( .ik))
- rep( .ik, length.out = length(y)) else {
- rep(mean(y) / b.init, length.out = length(y))
- }
- d.init = if (length( .id))
- rep( .id, length.out = length(y)) else {
- rep(digamma(k.init) / mean(log(y/b.init)),
- length.out = length(y))
- }
- etastart = cbind(theta2eta(b.init, .lscale, earg = .escale),
- theta2eta(d.init, .ld, earg = .ed),
- theta2eta(k.init, .lk, earg = .ek))
- }
- }), list( .lscale = lscale, .ld = ld, .lk = lk,
- .escale = escale, .ed = ed, .ek = ek,
- .iscale = iscale, .id=id, .ik=ik ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- b = eta2theta(eta[, 1], .lscale, earg = .escale)
- k = eta2theta(eta[, 3], .lk, earg = .ek)
- b * k
- }, list( .ld = ld, .lscale = lscale, .lk = lk,
- .escale = escale, .ed = ed, .ek = ek ))),
- last = eval(substitute(expression({
- misc$link = c(scale = .lscale, d= .ld, k= .lk)
- misc$earg = list(scale = .escale, d= .ed, k= .ek)
- }), list( .lscale = lscale, .ld = ld, .lk = lk,
- .escale = escale, .ed = ed, .ek = ek ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- b = eta2theta(eta[, 1], .lscale, earg = .escale)
- d = eta2theta(eta[, 2], .ld, earg = .ed)
- k = eta2theta(eta[, 3], .lk, earg = .ek)
- if (residuals) stop("loglikelihood residuals not ",
- "implemented yet") else {
- sum(w * dgengamma(x = y, scale=b, d=d, k=k, log = TRUE))
- }
- }, list( .lscale = lscale, .ld = ld, .lk = lk,
- .escale = escale, .ed = ed, .ek = ek ))),
- vfamily = c("gengamma"),
- deriv = eval(substitute(expression({
- b = eta2theta(eta[, 1], .lscale, earg = .escale)
- d = eta2theta(eta[, 2], .ld, earg = .ed)
- k = eta2theta(eta[, 3], .lk, earg = .ek)
- tmp22 = (y/b)^d
- tmp33 = log(y/b)
- dl.db = d * (tmp22 - k) / b
- dl.dd = 1/d + tmp33 * (k - tmp22)
- dl.dk = d * tmp33 - digamma(k)
- db.deta = dtheta.deta(b, .lscale, earg = .escale)
- dd.deta = dtheta.deta(d, .ld, earg = .ed)
- dk.deta = dtheta.deta(k, .lk, earg = .ek)
- c(w) * cbind(dl.db * db.deta,
- dl.dd * dd.deta,
- dl.dk * dk.deta)
- }), list( .lscale = lscale, .ld = ld, .lk = lk,
- .escale = escale, .ed = ed, .ek = ek ))),
- weight = eval(substitute(expression({
- ed2l.db2 = k * (d/b)^2
- ed2l.dd2 = (1 + k * (trigamma(k+1) + (digamma(k+1))^2)) / d^2
- ed2l.dk2 = trigamma(k)
- ed2l.dbdd = -(1 + k*digamma(k)) / b
- ed2l.dbdk = d / b
- ed2l.dddk = -digamma(k) / d
- wz = matrix(as.numeric(NA), n, dimm(M))
- wz[,iam(1,1,M)] = ed2l.db2 * db.deta^2
- wz[,iam(2,2,M)] = ed2l.dd2 * dd.deta^2
- wz[,iam(3,3,M)] = ed2l.dk2 * dk.deta^2
- wz[,iam(1,2,M)] = ed2l.dbdd * db.deta * dd.deta
- wz[,iam(1,3,M)] = ed2l.dbdk * db.deta * dk.deta
- wz[,iam(2,3,M)] = ed2l.dddk * dd.deta * dk.deta
- wz = c(w) * wz
- wz
- }), list( .lscale = lscale, .ld = ld, .lk = lk,
- .escale = escale, .ed = ed, .ek = ek ))))
+ lk <- as.list(substitute(lk))
+ ek <- link2list(lk)
+ lk <- attr(ek, "function.name")
+
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+ if (length(iscale) &&
+ !is.Numeric(iscale, positive = TRUE))
+ stop("bad input for argument 'iscale'")
+
+
+
+
+ new("vglmff",
+ blurb = c("Generalized gamma distribution",
+ " f(y) = d * b^(-d*k) * y^(d*k-1) * exp(-(y/b)^d) / gamma(k),\n",
+ "scale=b>0, d>0, k>0, y>0\n\n",
+ "Links: ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("d", ld, earg = ed), ", ",
+ namesof("k", lk, earg = ek), "\n", "\n",
+ "Mean: b * gamma(k+1/d) / gamma(k)", "\n"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+
+ predictors.names <-
+ c(namesof("scale", .lscale , earg = .escale , tag = FALSE),
+ namesof("d", .ld , earg = .ed , tag = FALSE),
+ namesof("k", .lk , earg = .ek , tag = FALSE))
+
+
+ if (!length(etastart)) {
+ b.init = if (length( .iscale ))
+ rep( .iscale, length.out = length(y)) else {
+ rep(mean(y^2) / mean(y), length.out = length(y))
+ }
+ k.init = if (length( .ik ))
+ rep( .ik , length.out = length(y)) else {
+ rep(mean(y) / b.init, length.out = length(y))
+ }
+ d.init = if (length( .id ))
+ rep( .id , length.out = length(y)) else {
+ rep(digamma(k.init) / mean(log(y / b.init)),
+ length.out = length(y))
+ }
+ etastart <-
+ cbind(theta2eta(b.init, .lscale , earg = .escale ),
+ theta2eta(d.init, .ld , earg = .ed ),
+ theta2eta(k.init, .lk , earg = .ek ))
+ }
+ }), list( .lscale = lscale, .ld = ld, .lk = lk,
+ .escale = escale, .ed = ed, .ek = ek,
+ .iscale = iscale, .id = id, .ik = ik ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ b = eta2theta(eta[, 1], .lscale , earg = .escale )
+ d = eta2theta(eta[, 2], .ld , earg = .ed )
+ k = eta2theta(eta[, 3], .lk , earg = .ek )
+ b * gamma(k + 1 / d) / gamma(k)
+ }, list( .lscale = lscale, .lk = lk, .ld = ld,
+ .escale = escale, .ek = ek, .ed = ed ))),
+ last = eval(substitute(expression({
+ misc$link <- c(scale = .lscale , d = .ld , k = .lk )
+ misc$earg <- list(scale = .escale , d = .ed , k = .ek )
+ misc$expected <- TRUE
+ }), list( .lscale = lscale, .ld = ld, .lk = lk,
+ .escale = escale, .ed = ed, .ek = ek ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ b = eta2theta(eta[, 1], .lscale , earg = .escale )
+ d = eta2theta(eta[, 2], .ld , earg = .ed )
+ k = eta2theta(eta[, 3], .lk , earg = .ek )
+
+ if (residuals) stop("loglikelihood residuals not ",
+ "implemented yet") else {
+ sum(c(w) * dgengamma(x = y, scale = b, d = d, k = k, log = TRUE))
+ }
+ }, list( .lscale = lscale, .ld = ld, .lk = lk,
+ .escale = escale, .ed = ed, .ek = ek ))),
+ vfamily = c("gengamma"),
+ deriv = eval(substitute(expression({
+ b = eta2theta(eta[, 1], .lscale , earg = .escale )
+ d = eta2theta(eta[, 2], .ld , earg = .ed )
+ k = eta2theta(eta[, 3], .lk , earg = .ek )
+
+ tmp22 = (y/b)^d
+ tmp33 = log(y/b)
+ dl.db = d * (tmp22 - k) / b
+ dl.dd = 1/d + tmp33 * (k - tmp22)
+ dl.dk = d * tmp33 - digamma(k)
+
+ db.deta = dtheta.deta(b, .lscale , earg = .escale )
+ dd.deta = dtheta.deta(d, .ld , earg = .ed )
+ dk.deta = dtheta.deta(k, .lk , earg = .ek )
+
+ c(w) * cbind(dl.db * db.deta,
+ dl.dd * dd.deta,
+ dl.dk * dk.deta)
+ }), list( .lscale = lscale, .ld = ld, .lk = lk,
+ .escale = escale, .ed = ed, .ek = ek ))),
+ weight = eval(substitute(expression({
+ ned2l.db2 = k * (d/b)^2
+ ned2l.dd2 = (1 + k * (trigamma(k+1) + (digamma(k+1))^2)) / d^2
+ ned2l.dk2 = trigamma(k)
+ ned2l.dbdd = -(1 + k*digamma(k)) / b
+ ned2l.dbdk = d / b
+ ned2l.dddk = -digamma(k) / d
+
+ wz = matrix(as.numeric(NA), n, dimm(M))
+ wz[, iam(1, 1, M)] = ned2l.db2 * db.deta^2
+ wz[, iam(2, 2, M)] = ned2l.dd2 * dd.deta^2
+ wz[, iam(3, 3, M)] = ned2l.dk2 * dk.deta^2
+ wz[, iam(1, 2, M)] = ned2l.dbdd * db.deta * dd.deta
+ wz[, iam(1, 3, M)] = ned2l.dbdk * db.deta * dk.deta
+ wz[, iam(2, 3, M)] = ned2l.dddk * dd.deta * dk.deta
+
+ wz = c(w) * wz
+ wz
+ }), list( .lscale = lscale, .ld = ld, .lk = lk,
+ .escale = escale, .ed = ed, .ek = ek ))))
}
-dlog = function(x, prob, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
- rm(log)
+
+
+dlog <- function(x, prob, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
+ rm(log)
if (!is.Numeric(prob, positive = TRUE) || max(prob) >= 1)
stop("bad input for argument 'prob'")
@@ -5657,7 +6417,7 @@ dlog = function(x, prob, log = FALSE) {
-plog = function(q, prob, log.p = FALSE) {
+plog <- function(q, prob, log.p = FALSE) {
if (!is.Numeric(q)) stop("bad input for argument 'q'")
if (!is.Numeric(prob, positive = TRUE) || max(prob) >= 1)
stop("bad input for argument 'prob'")
@@ -5700,39 +6460,9 @@ plog = function(q, prob, log.p = FALSE) {
- if (FALSE)
-plog = function(q, prob, log.p = FALSE) {
- if (!is.Numeric(q)) stop("bad input for argument 'q'")
- if (!is.Numeric(prob, positive = TRUE) || max(prob) >= 1)
- stop("bad input for argument 'prob'")
- N = max(length(q), length(prob))
- q = rep(q, length.out = N); prob = rep(prob, length.out = N);
- ans = q * 0 # Retains names(q)
- if (max(abs(prob-prob[1])) < 1.0e-08) {
- qstar = floor(q)
- temp = if (max(qstar) >= 1) dlog(x = 1:max(qstar),
- prob = prob[1]) else 0*qstar
- unq = unique(qstar)
- for(ii in unq) {
- index = qstar == ii
- ans[index] = if (ii >= 1) sum(temp[1:ii]) else 0
- }
- } else
- for(ii in 1:N) {
- qstar = floor(q[ii])
- ans[ii] = if (qstar >= 1)
- sum(dlog(x = 1:qstar, prob = prob[ii])) else 0
- }
- if (log.p) log(ans) else ans
-}
-
-
-
-
-
-rlog = function(n, prob, Smallno=1.0e-6) {
+rlog <- function(n, prob, Smallno = 1.0e-6) {
use.n = if ((length.n <- length(n)) > 1) length.n else
if (!is.Numeric(n, integer.valued = TRUE,
@@ -5751,7 +6481,7 @@ rlog = function(n, prob, Smallno=1.0e-6) {
ptr1 = 1; ptr2 = 0
a = -1 / log1p(-prob)
mean = a*prob/(1-prob) # E(Y)
- sigma = sqrt(a*prob*(1-a*prob)) / (1-prob) # sd(Y)
+ sigma = sqrt(a * prob * (1 - a * prob)) / (1 - prob) # sd(Y)
ymax = dlog(x = 1, prob)
while(ptr2 < use.n) {
Lower = 0.5 # A continuity correction is used = 1 - 0.5.
@@ -5778,70 +6508,129 @@ rlog = function(n, prob, Smallno=1.0e-6) {
- logff = function(link = "logit", earg = list(), init.c = NULL)
+ logff <- function(link = "logit", init.c = NULL, zero = NULL)
{
if (length(init.c) &&
(!is.Numeric(init.c, positive = TRUE) || max(init.c) >= 1))
stop("init.c must be in (0,1)")
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg))
- earg = list()
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
new("vglmff",
blurb = c("Logarithmic distribution f(y) = a * c^y / y, ",
- "y = 1,2,3,...,\n",
+ "y = 1, 2, 3,...,\n",
" 0 < c < 1, a = -1 / log(1-c) \n\n",
"Link: ", namesof("c", link, earg = earg), "\n", "\n",
"Mean: a * c / (1 - c)", "\n"),
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 1
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ zero = .zero )
+ }, list( .zero = zero ))),
+
+
initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ Is.integer.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ ncoly <- ncol(y)
+ Musual <- 1
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+ mynames1 <- paste("c", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ namesof(mynames1, .link , earg = .earg , tag = FALSE)
- predictors.names = namesof("c", .link, earg = .earg, tag = FALSE)
if (!length(etastart)) {
- llfun = function(cc, y, w) {
- a = -1 / log1p(-cc)
- sum(w * (log(a) + y * log(cc) - log(y)))
+ logff.Loglikfun <- function(probval, y, x, w, extraargs) {
+ sum(c(w) * dlog(x = y, prob = probval, log = TRUE))
}
- c.init = if (length( .init.c )) .init.c else
- getInitVals(gvals = seq(0.05, 0.95, length.out = 9),
- llfun = llfun, y = y, w = w)
- c.init = rep(c.init, length = length(y))
- etastart = theta2eta(c.init, .link, earg = .earg )
+ Init.c <- matrix(if (length( .init.c )) .init.c else 0,
+ n, M, byrow = TRUE)
+
+ if (!length( .init.c ))
+ for(ilocal in 1:ncoly) {
+ prob.grid <- seq(0.05, 0.95, by = 0.05)
+ Init.c[, ilocal] <- getMaxMin(prob.grid,
+ objfun = logff.Loglikfun,
+ y = y[, ilocal], x = x,
+ w = w[, ilocal])
+
+ }
+ etastart <- theta2eta(Init.c, .link , earg = .earg )
}
}), list( .link = link, .earg = earg, .init.c = init.c ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- cc = eta2theta(eta, .link, earg = .earg )
- a = -1 / log1p(-cc)
- a * cc / (1-cc)
+ cc <- eta2theta(eta, .link , earg = .earg )
+ aa <- -1 / log1p(-cc)
+ aa * cc / (1 - cc)
}, list( .link = link, .earg = earg ))),
+
last = eval(substitute(expression({
- misc$link = c(c = .link )
- misc$earg = list(c = .earg )
- misc$expected = TRUE
+ Musual <- extra$Musual
+ misc$link <- c(rep( .link , length = ncoly))
+ names(misc$link) <- mynames1
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- mynames1
+ for(ii in 1:ncoly) {
+ misc$earg[[ii]] <- .earg
+ }
+
+ misc$Musual <- Musual
+ misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
}), list( .link = link, .earg = earg ))),
+
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- cc = eta2theta(eta, .link, earg = .earg )
- a = -1 / log1p(-cc)
+ cc <- eta2theta(eta, .link , earg = .earg )
+ aa <- -1 / log1p(-cc)
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w * dlog(x = y, prob = -expm1(-1/a), log = TRUE))
+ sum(c(w) * dlog(x = y, prob = -expm1(-1/aa), log = TRUE))
}
}, list( .link = link, .earg = earg ))),
vfamily = c("logff"),
deriv = eval(substitute(expression({
- cc = eta2theta(eta, .link, earg = .earg )
- a = -1 / log1p(-cc)
- dl.dc = 1 / ((1 - cc) * log1p(-cc)) + y / cc
- dc.deta = dtheta.deta(cc, .link, earg = .earg )
+ Musual <- 1
+ cc <- eta2theta(eta, .link , earg = .earg )
+ aa <- -1 / log1p(-cc)
+ dl.dc <- 1 / ((1 - cc) * log1p(-cc)) + y / cc
+ dc.deta <- dtheta.deta(cc, .link , earg = .earg )
c(w) * dl.dc * dc.deta
}), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
- ed2l.dc2 = a * (1 - a * cc) / (cc * (1-cc)^2)
- wz = c(w) * dc.deta^2 * ed2l.dc2
+ ned2l.dc2 <- aa * (1 - aa * cc) / (cc * (1-cc)^2)
+ wz <- c(w) * dc.deta^2 * ned2l.dc2
wz
}), list( .link = link, .earg = earg ))))
}
@@ -5849,119 +6638,129 @@ rlog = function(n, prob, Smallno=1.0e-6) {
-
- levy = function(delta = NULL, link.gamma = "loge",
- earg = list(), idelta = NULL, igamma = NULL)
+ levy <- function(delta = NULL, link.gamma = "loge",
+ idelta = NULL, igamma = NULL)
{
- delta.known = is.Numeric(delta, allowable.length = 1)
- if (mode(link.gamma) != "character" && mode(link.gamma) != "name")
- link.gamma = as.character(substitute(link.gamma))
- if (!is.list(earg)) earg = list()
+ delta.known = is.Numeric(delta, allowable.length = 1)
- new("vglmff",
- blurb = c("Levy distribution f(y) = sqrt(gamma/(2*pi)) * ",
- "(y-delta)^(-3/2) * \n",
- " exp(-gamma / (2*(y-delta ))),\n",
- " delta < y, gamma > 0",
- if (delta.known) paste(", delta = ", delta, ",", sep = ""),
- "\n\n",
- if (delta.known) "Link: " else "Links: ",
- namesof("gamma", link.gamma, earg = earg),
- if (! delta.known)
- c(", ", namesof("delta", "identity", earg = list())),
- "\n\n",
- "Mean: NA",
- "\n"),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("gamma", .link.gamma, earg = .earg, tag = FALSE),
- if ( .delta.known) NULL else
- namesof("delta", "identity", earg = list(), tag = FALSE))
+ link.gamma <- as.list(substitute(link.gamma))
+ earg <- link2list(link.gamma)
+ link.gamma <- attr(earg, "function.name")
- if (!length(etastart)) {
- delta.init = if ( .delta.known) {
- if (min(y,na.rm= TRUE) <= .delta)
- stop("delta must be < min(y)")
- .delta
- } else {
- if (length( .idelta)) .idelta else
- min(y,na.rm= TRUE) - 1.0e-4 *
- diff(range(y,na.rm= TRUE))
- }
- gamma.init = if (length( .igamma)) .igamma else
- median(y - delta.init) # = 1/median(1/(y-delta.init))
- gamma.init = rep(gamma.init, length = length(y))
- etastart = cbind(theta2eta(gamma.init, .link.gamma, earg = .earg ),
- if ( .delta.known) NULL else delta.init)
-
- }
- }), list( .link.gamma = link.gamma, .earg = earg,
- .delta.known = delta.known,
- .delta = delta,
- .idelta = idelta,
- .igamma = igamma ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta = as.matrix(eta)
- mygamma = eta2theta(eta[, 1], .link.gamma, earg = .earg )
- delta = if ( .delta.known) .delta else eta[, 2]
- NA * mygamma
- }, list( .link.gamma = link.gamma, .earg = earg,
- .delta.known=delta.known,
- .delta=delta ))),
- last = eval(substitute(expression({
- misc$link = if ( .delta.known) NULL else c(delta = "identity")
- misc$link = c(gamma = .link.gamma, misc$link)
- misc$earg = if ( .delta.known) list(gamma = .earg ) else
- list(gamma = .earg, delta = list())
- if ( .delta.known)
- misc$delta = .delta
- }), list( .link.gamma = link.gamma, .earg = earg,
- .delta.known=delta.known,
- .delta=delta ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- eta = as.matrix(eta)
- mygamma = eta2theta(eta[, 1], .link.gamma, earg = .earg )
- delta = if ( .delta.known) .delta else eta[, 2]
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else
- sum(w * 0.5 * (log(mygamma) -3*log(y-delta) - mygamma / (y-delta )))
- }, list( .link.gamma = link.gamma, .earg = earg,
- .delta.known=delta.known,
- .delta=delta ))),
- vfamily = c("levy"),
- deriv = eval(substitute(expression({
- eta = as.matrix(eta)
- mygamma = eta2theta(eta[, 1], .link.gamma, earg = .earg )
- delta = if ( .delta.known) .delta else eta[, 2]
- if (! .delta.known)
- dl.ddelta = (3 - mygamma / (y-delta)) / (2 * (y-delta))
- dl.dgamma = 0.5 * (1 / mygamma - 1 / (y-delta))
- dgamma.deta = dtheta.deta(mygamma, .link.gamma, earg = .earg )
- c(w) * cbind(dl.dgamma * dgamma.deta,
- if ( .delta.known) NULL else dl.ddelta)
- }), list( .link.gamma = link.gamma, .earg = earg,
- .delta.known=delta.known,
- .delta=delta ))),
- weight = eval(substitute(expression({
- wz = matrix(as.numeric(NA), n, dimm(M)) # M = if (delta is known) 1 else 2
- wz[,iam(1,1,M)] = 1 * dgamma.deta^2
- if (! .delta.known) {
- wz[,iam(1,2,M)] = 3 * dgamma.deta
- wz[,iam(2,2,M)] = 21
- }
- wz = c(w) * wz / (2 * mygamma^2)
- wz
- }), list( .link.gamma = link.gamma, .earg = earg,
- .delta.known=delta.known,
- .delta=delta ))))
+ new("vglmff",
+ blurb = c("Levy distribution f(y) = sqrt(gamma/(2*pi)) * ",
+ "(y-delta)^(-3/2) * \n",
+ " exp(-gamma / (2*(y-delta ))),\n",
+ " delta < y, gamma > 0",
+ if (delta.known) paste(", delta = ", delta, ",", sep = ""),
+ "\n\n",
+ if (delta.known) "Link: " else "Links: ",
+ namesof("gamma", link.gamma, earg = earg),
+ if (! delta.known)
+ c(", ", namesof("delta", "identity", earg = list())),
+ "\n\n",
+ "Mean: NA",
+ "\n"),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+
+
+ predictors.names <-
+ c(namesof("gamma", .link.gamma, earg = .earg , tag = FALSE),
+ if ( .delta.known) NULL else
+ namesof("delta", "identity", earg = list(), tag = FALSE))
+
+
+ if (!length(etastart)) {
+ delta.init = if ( .delta.known) {
+ if (min(y,na.rm = TRUE) <= .delta)
+ stop("delta must be < min(y)")
+ .delta
+ } else {
+ if (length( .idelta)) .idelta else
+ min(y,na.rm = TRUE) - 1.0e-4 *
+ diff(range(y,na.rm = TRUE))
+ }
+ gamma.init = if (length( .igamma)) .igamma else
+ median(y - delta.init) # = 1/median(1/(y-delta.init))
+ gamma.init = rep(gamma.init, length = length(y))
+ etastart <-
+ cbind(theta2eta(gamma.init, .link.gamma , earg = .earg ),
+ if ( .delta.known) NULL else delta.init)
+
+ }
+ }), list( .link.gamma = link.gamma, .earg = earg,
+ .delta.known = delta.known,
+ .delta = delta,
+ .idelta = idelta,
+ .igamma = igamma ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ eta = as.matrix(eta)
+ mygamma = eta2theta(eta[, 1], .link.gamma, earg = .earg )
+ delta = if ( .delta.known) .delta else eta[, 2]
+
+
+ NA * mygamma
+ }, list( .link.gamma = link.gamma, .earg = earg,
+ .delta.known = delta.known,
+ .delta = delta ))),
+ last = eval(substitute(expression({
+ misc$link = if ( .delta.known) NULL else c(delta = "identity")
+ misc$link = c(gamma = .link.gamma, misc$link)
+ misc$earg = if ( .delta.known) list(gamma = .earg ) else
+ list(gamma = .earg , delta = list())
+ if ( .delta.known)
+ misc$delta = .delta
+ }), list( .link.gamma = link.gamma, .earg = earg,
+ .delta.known = delta.known,
+ .delta = delta ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ eta = as.matrix(eta)
+ mygamma = eta2theta(eta[, 1], .link.gamma, earg = .earg )
+ delta = if ( .delta.known) .delta else eta[, 2]
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else
+ sum(c(w) * 0.5 * (log(mygamma) -3 * log(y - delta) -
+ mygamma / (y - delta)))
+ }, list( .link.gamma = link.gamma, .earg = earg,
+ .delta.known = delta.known,
+ .delta = delta ))),
+ vfamily = c("levy"),
+ deriv = eval(substitute(expression({
+ eta = as.matrix(eta)
+ mygamma = eta2theta(eta[, 1], .link.gamma, earg = .earg )
+ delta = if ( .delta.known) .delta else eta[, 2]
+ if (! .delta.known)
+ dl.ddelta = (3 - mygamma / (y-delta)) / (2 * (y-delta))
+ dl.dgamma = 0.5 * (1 / mygamma - 1 / (y-delta))
+ dgamma.deta = dtheta.deta(mygamma, .link.gamma, earg = .earg )
+ c(w) * cbind(dl.dgamma * dgamma.deta,
+ if ( .delta.known) NULL else dl.ddelta)
+ }), list( .link.gamma = link.gamma, .earg = earg,
+ .delta.known = delta.known,
+ .delta = delta ))),
+ weight = eval(substitute(expression({
+ wz = matrix(as.numeric(NA), n, dimm(M)) # M = if (delta is known) 1 else 2
+ wz[, iam(1, 1, M)] = 1 * dgamma.deta^2
+ if (! .delta.known) {
+ wz[, iam(1, 2, M)] = 3 * dgamma.deta
+ wz[, iam(2, 2, M)] = 21
+ }
+ wz = c(w) * wz / (2 * mygamma^2)
+ wz
+ }), list( .link.gamma = link.gamma, .earg = earg,
+ .delta.known = delta.known,
+ .delta = delta ))))
}
@@ -5969,8 +6768,8 @@ rlog = function(n, prob, Smallno=1.0e-6) {
-dlino = function(x, shape1, shape2, lambda = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
+dlino <- function(x, shape1, shape2, lambda = 1, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -5981,7 +6780,7 @@ dlino = function(x, shape1, shape2, lambda = 1, log = FALSE) {
}
-plino = function(q, shape1, shape2, lambda = 1) {
+plino <- function(q, shape1, shape2, lambda = 1) {
ans = pbeta(q = lambda * q / (1 - (1-lambda)*q),
shape1 = shape1, shape2 = shape2)
ans[lambda <= 0] = NaN
@@ -5989,7 +6788,7 @@ plino = function(q, shape1, shape2, lambda = 1) {
}
-qlino = function(p, shape1, shape2, lambda = 1) {
+qlino <- function(p, shape1, shape2, lambda = 1) {
Y = qbeta(p = p, shape1 = shape1, shape2 = shape2)
ans = Y / (lambda + (1-lambda)*Y)
ans[lambda <= 0] = NaN
@@ -5997,7 +6796,7 @@ qlino = function(p, shape1, shape2, lambda = 1) {
}
-rlino = function(n, shape1, shape2, lambda = 1) {
+rlino <- function(n, shape1, shape2, lambda = 1) {
Y = rbeta(n = n, shape1 = shape1, shape2 = shape2)
ans = Y / (lambda + (1 - lambda) * Y)
ans[lambda <= 0] = NaN
@@ -6006,129 +6805,154 @@ rlino = function(n, shape1, shape2, lambda = 1) {
- lino = function(lshape1 = "loge",
- lshape2 = "loge",
- llambda = "loge",
- eshape1 = list(), eshape2 = list(), elambda = list(),
- ishape1 = NULL, ishape2 = NULL, ilambda = 1, zero = NULL)
+ lino <- function(lshape1 = "loge",
+ lshape2 = "loge",
+ llambda = "loge",
+ ishape1 = NULL, ishape2 = NULL, ilambda = 1,
+ zero = NULL)
{
- if (mode(lshape1) != "character" && mode(lshape1) != "name")
- lshape1 = as.character(substitute(lshape1))
- if (mode(lshape2) != "character" && mode(lshape2) != "name")
- lshape2 = as.character(substitute(lshape2))
- if (mode(llambda) != "character" && mode(llambda) != "name")
- llambda = as.character(substitute(llambda))
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
- if (!is.Numeric(ilambda, positive = TRUE))
- stop("bad input for argument 'ilambda'")
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+ if (!is.Numeric(ilambda, positive = TRUE))
+ stop("bad input for argument 'ilambda'")
- if (!is.list(eshape1)) eshape1 = list()
- if (!is.list(eshape2)) eshape2 = list()
- if (!is.list(elambda)) elambda = list()
- new("vglmff",
- blurb = c("Generalized Beta distribution (Libby and Novick, 1982)\n\n",
- "Links: ",
- namesof("shape1", lshape1, earg = eshape1), ", ",
- namesof("shape2", lshape2, earg = eshape2), ", ",
- namesof("lambda", llambda, earg = elambda), "\n",
- "Mean: something complicated"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- predictors.names =
- c(namesof("shape1", .lshape1, earg = .eshape1, tag = FALSE),
- namesof("shape2", .lshape2, earg = .eshape2, tag = FALSE),
- namesof("lambda", .llambda, earg = .elambda, tag = FALSE))
- if (min(y) <= 0 || max(y) >= 1)
- stop("values of the response must be between 0 and 1 (0,1)")
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- if (!length(etastart)) {
- lambda.init = rep(if (length( .ilambda )) .ilambda else 1,
- length = n)
- sh1.init = if (length( .ishape1 ))
- rep( .ishape1, length = n) else NULL
- sh2.init = if (length( .ishape2 ))
- rep( .ishape2, length = n) else NULL
+
+ lshape1 <- as.list(substitute(lshape1))
+ eshape1 <- link2list(lshape1)
+ lshape1 <- attr(eshape1, "function.name")
+
+ lshape2 <- as.list(substitute(lshape2))
+ eshape2 <- link2list(lshape2)
+ lshape2 <- attr(eshape2, "function.name")
+
+ llambda <- as.list(substitute(llambda))
+ elambda <- link2list(llambda)
+ llambda <- attr(elambda, "function.name")
+
+
+ new("vglmff",
+ blurb = c("Generalized Beta distribution (Libby and Novick, 1982)\n\n",
+ "Links: ",
+ namesof("shape1", lshape1, earg = eshape1), ", ",
+ namesof("shape2", lshape2, earg = eshape2), ", ",
+ namesof("lambda", llambda, earg = elambda), "\n",
+ "Mean: something complicated"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (min(y) <= 0 || max(y) >= 1)
+ stop("values of the response must be between 0 and 1 (0,1)")
+
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+
+
+ predictors.names <-
+ c(namesof("shape1", .lshape1 , earg = .eshape1 , tag = FALSE),
+ namesof("shape2", .lshape2 , earg = .eshape2 , tag = FALSE),
+ namesof("lambda", .llambda , earg = .elambda , tag = FALSE))
+
+
+
+
+ if (!length(etastart)) {
+ lambda.init = rep(if (length( .ilambda )) .ilambda else 1,
+ length = n)
+ sh1.init = if (length( .ishape1 ))
+ rep( .ishape1, length = n) else NULL
+ sh2.init = if (length( .ishape2 ))
+ rep( .ishape2, length = n) else NULL
txY.init = lambda.init * y / (1+lambda.init*y - y)
mean1 = mean(txY.init)
- mean2 = mean(1/txY.init)
- if (!is.Numeric(sh1.init))
- sh1.init = rep((mean2 - 1) / (mean2 - 1/mean1), length = n)
- if (!is.Numeric(sh2.init))
- sh2.init = rep(sh1.init * (1-mean1) / mean1, length = n)
- etastart = cbind(theta2eta(sh1.init, .lshape1, earg = .eshape1),
- theta2eta(sh2.init, .lshape2, earg = .eshape2),
- theta2eta(lambda.init, .llambda, earg = .elambda))
- }
- }), list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
- .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda,
- .ishape1=ishape1, .ishape2=ishape2, .ilambda = ilambda ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- sh1 = eta2theta(eta[, 1], .lshape1, earg = .eshape1)
- sh2 = eta2theta(eta[, 2], .lshape2, earg = .eshape2)
- lambda = eta2theta(eta[, 3], .llambda, earg = .elambda)
- rep(as.numeric(NA), length = nrow(eta))
- }, list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
- .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
- last = eval(substitute(expression({
- misc$link = c(shape1 = .lshape1, shape2 = .lshape2, lambda = .llambda)
- misc$earg = list(shape1 = .eshape1, shape2 = .eshape2, lambda = .elambda)
- }), list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
- .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- sh1 = eta2theta(eta[, 1], .lshape1, earg = .eshape1)
- sh2 = eta2theta(eta[, 2], .lshape2, earg = .eshape2)
- lambda = eta2theta(eta[, 3], .llambda, earg = .elambda)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(w * dlino(y, shape1 = sh1, shape2 = sh2, lambda=lambda, log = TRUE))
+ mean2 = mean(1/txY.init)
+ if (!is.Numeric(sh1.init))
+ sh1.init = rep((mean2 - 1) / (mean2 - 1/mean1), length = n)
+ if (!is.Numeric(sh2.init))
+ sh2.init = rep(sh1.init * (1-mean1) / mean1, length = n)
+ etastart <-
+ cbind(theta2eta(sh1.init, .lshape1 , earg = .eshape1),
+ theta2eta(sh2.init, .lshape2 , earg = .eshape2),
+ theta2eta(lambda.init, .llambda , earg = .elambda ))
}
- }, list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
- .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
- vfamily = c("lino"),
- deriv = eval(substitute(expression({
- sh1 = eta2theta(eta[, 1], .lshape1, earg = .eshape1)
- sh2 = eta2theta(eta[, 2], .lshape2, earg = .eshape2)
- lambda = eta2theta(eta[, 3], .llambda, earg = .elambda)
- temp1 = log1p(-(1-lambda) * y)
- temp2 = digamma(sh1+sh2)
- dl.dsh1 = log(lambda) + log(y) - digamma(sh1) + temp2 - temp1
- dl.dsh2 = log1p(-y) - digamma(sh2) + temp2 - temp1
- dl.dlambda = sh1/lambda - (sh1+sh2) * y / (1 - (1-lambda) * y)
- dsh1.deta = dtheta.deta(sh1, .lshape1, earg = .eshape1)
- dsh2.deta = dtheta.deta(sh2, .lshape2, earg = .eshape2)
- dlambda.deta = dtheta.deta(lambda, .llambda, earg = .elambda)
- c(w) * cbind( dl.dsh1 * dsh1.deta,
- dl.dsh2 * dsh2.deta,
- dl.dlambda * dlambda.deta)
- }), list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
- .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
- weight = eval(substitute(expression({
- temp3 = trigamma(sh1+sh2)
- ed2l.dsh1 = trigamma(sh1) - temp3
- ed2l.dsh2 = trigamma(sh2) - temp3
- ed2l.dlambda2 = sh1 * sh2 / (lambda^2 * (sh1+sh2+1))
- ed2l.dsh1sh2 = -temp3
- ed2l.dsh1lambda = -sh2 / ((sh1+sh2)*lambda)
- ed2l.dsh2lambda = sh1 / ((sh1+sh2)*lambda)
- wz = matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
- wz[,iam(1,1,M)] = ed2l.dsh1 * dsh1.deta^2
- wz[,iam(2,2,M)] = ed2l.dsh2 * dsh2.deta^2
- wz[,iam(3,3,M)] = ed2l.dlambda2 * dlambda.deta^2
- wz[,iam(1,2,M)] = ed2l.dsh1sh2 * dsh1.deta * dsh2.deta
- wz[,iam(1,3,M)] = ed2l.dsh1lambda * dsh1.deta * dlambda.deta
- wz[,iam(2,3,M)] = ed2l.dsh2lambda * dsh2.deta * dlambda.deta
- wz = c(w) * wz
- wz
- }), list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
- .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))))
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
+ .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda,
+ .ishape1=ishape1, .ishape2=ishape2, .ilambda = ilambda ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ sh1 = eta2theta(eta[, 1], .lshape1 , earg = .eshape1)
+ sh2 = eta2theta(eta[, 2], .lshape2 , earg = .eshape2)
+ lambda = eta2theta(eta[, 3], .llambda , earg = .elambda )
+ rep(as.numeric(NA), length = nrow(eta))
+ }, list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
+ .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
+ last = eval(substitute(expression({
+ misc$link <- c(shape1 = .lshape1 , shape2 = .lshape2 ,
+ lambda = .llambda )
+ misc$earg <- list(shape1 = .eshape1 , shape2 = .eshape2 ,
+ lambda = .elambda )
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
+ .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ sh1 = eta2theta(eta[, 1], .lshape1 , earg = .eshape1)
+ sh2 = eta2theta(eta[, 2], .lshape2 , earg = .eshape2)
+ lambda = eta2theta(eta[, 3], .llambda , earg = .elambda )
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(c(w) * dlino(y, shape1 = sh1, shape2 = sh2,
+ lambda = lambda, log = TRUE))
+ }
+ }, list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
+ .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
+ vfamily = c("lino"),
+ deriv = eval(substitute(expression({
+ sh1 = eta2theta(eta[, 1], .lshape1 , earg = .eshape1)
+ sh2 = eta2theta(eta[, 2], .lshape2 , earg = .eshape2)
+ lambda = eta2theta(eta[, 3], .llambda , earg = .elambda )
+
+ temp1 = log1p(-(1-lambda) * y)
+ temp2 = digamma(sh1+sh2)
+
+ dl.dsh1 = log(lambda) + log(y) - digamma(sh1) + temp2 - temp1
+ dl.dsh2 = log1p(-y) - digamma(sh2) + temp2 - temp1
+ dl.dlambda = sh1/lambda - (sh1+sh2) * y / (1 - (1-lambda) * y)
+
+ dsh1.deta = dtheta.deta(sh1, .lshape1 , earg = .eshape1)
+ dsh2.deta = dtheta.deta(sh2, .lshape2 , earg = .eshape2)
+ dlambda.deta = dtheta.deta(lambda, .llambda , earg = .elambda )
+
+ c(w) * cbind( dl.dsh1 * dsh1.deta,
+ dl.dsh2 * dsh2.deta,
+ dl.dlambda * dlambda.deta)
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
+ .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))),
+ weight = eval(substitute(expression({
+ temp3 = trigamma(sh1+sh2)
+
+ ned2l.dsh1 = trigamma(sh1) - temp3
+ ned2l.dsh2 = trigamma(sh2) - temp3
+ ned2l.dlambda2 = sh1 * sh2 / (lambda^2 * (sh1+sh2+1))
+ ned2l.dsh1sh2 = -temp3
+ ned2l.dsh1lambda = -sh2 / ((sh1+sh2)*lambda)
+ ned2l.dsh2lambda = sh1 / ((sh1+sh2)*lambda)
+
+ wz = matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
+ wz[, iam(1, 1, M)] = ned2l.dsh1 * dsh1.deta^2
+ wz[, iam(2, 2, M)] = ned2l.dsh2 * dsh2.deta^2
+ wz[, iam(3, 3, M)] = ned2l.dlambda2 * dlambda.deta^2
+ wz[, iam(1, 2, M)] = ned2l.dsh1sh2 * dsh1.deta * dsh2.deta
+ wz[, iam(1, 3, M)] = ned2l.dsh1lambda * dsh1.deta * dlambda.deta
+ wz[, iam(2, 3, M)] = ned2l.dsh2lambda * dsh2.deta * dlambda.deta
+ wz = c(w) * wz
+ wz
+ }), list( .lshape1 = lshape1, .lshape2 = lshape2, .llambda = llambda,
+ .eshape1 = eshape1, .eshape2 = eshape2, .elambda = elambda ))))
}
@@ -6138,36 +6962,40 @@ rlino = function(n, shape1, shape2, lambda = 1) {
- genbetaII = function(lshape1.a = "loge",
- lscale = "loge",
- lshape2.p = "loge",
- lshape3.q = "loge",
- eshape1.a = list(), escale = list(),
- eshape2.p = list(), eshape3.q = list(),
- ishape1.a = NULL,
- iscale = NULL,
- ishape2.p = 1.0,
- ishape3.q = 1.0,
- zero = NULL)
+ genbetaII <- function(lshape1.a = "loge",
+ lscale = "loge",
+ lshape2.p = "loge",
+ lshape3.q = "loge",
+ ishape1.a = NULL,
+ iscale = NULL,
+ ishape2.p = 1.0,
+ ishape3.q = 1.0,
+ zero = NULL)
{
- if (mode(lshape1.a) != "character" && mode(lshape1.a) != "name")
- lshape1.a = as.character(substitute(lshape1.a))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (mode(lshape2.p) != "character" && mode(lshape2.p) != "name")
- lshape2.p = as.character(substitute(lshape2.p))
- if (mode(lshape3.q) != "character" && mode(lshape3.q) != "name")
- lshape3.q = as.character(substitute(lshape3.q))
if (length(zero) &&
!is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (!is.list(eshape1.a)) eshape1.a = list()
- if (!is.list(escale)) escale = list()
- if (!is.list(eshape2.p)) eshape2.p = list()
- if (!is.list(eshape3.q)) eshape3.q = list()
+
+ lshape1.a <- as.list(substitute(lshape1.a))
+ eshape1.a <- link2list(lshape1.a)
+ lshape1.a <- attr(eshape1.a, "function.name")
+
+ lshape2.p <- as.list(substitute(lshape2.p))
+ eshape2.p <- link2list(lshape2.p)
+ lshape2.p <- attr(eshape2.p, "function.name")
+
+ lshape3.q <- as.list(substitute(lshape3.q))
+ eshape3.q <- link2list(lshape3.q)
+ lshape3.q <- attr(eshape3.q, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
new("vglmff",
blurb = c("Generalized Beta II distribution\n\n",
@@ -6180,16 +7008,21 @@ rlino = function(n, shape1, shape2, lambda = 1) {
"gamma(shape3.q - 1/shape1.a) / ",
"(gamma(shape2.p) * gamma(shape3.q))"),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
- predictors.names =
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+ predictors.names <-
c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
- namesof("scale", .lscale, earg = .escale, tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE),
namesof("shape2.p", .lshape2.p, earg = .eshape2.p, tag = FALSE),
namesof("shape3.q", .lshape3.q, earg = .eshape3.q, tag = FALSE))
- if (!length( .ishape1.a) || !length( .iscale )) {
+ if (!length( .ishape1.a ) || !length( .iscale )) {
qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
ishape3.q = if (length( .ishape3.q)) .ishape3.q else 1
xvec = log( (1-qvec)^(-1/ ishape3.q ) - 1 )
@@ -6214,10 +7047,11 @@ rlino = function(n, shape1, shape2, lambda = 1) {
parg[outOfRange] = 1 / aa[outOfRange] + 1
- etastart = cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
- theta2eta(scale, .lscale, earg = .escale),
- theta2eta(parg, .lshape2.p, earg = .eshape2.p),
- theta2eta(qq, .lshape3.q, earg = .eshape3.q))
+ etastart <-
+ cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
+ theta2eta(scale, .lscale , earg = .escale ),
+ theta2eta(parg, .lshape2.p, earg = .eshape2.p),
+ theta2eta(qq, .lshape3.q, earg = .eshape3.q))
}
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
.lshape2.p = lshape2.p, .lshape3.q = lshape3.q,
@@ -6227,7 +7061,7 @@ rlino = function(n, shape1, shape2, lambda = 1) {
.ishape2.p = ishape2.p, .ishape3.q = ishape3.q ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
parg = eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
qq = eta2theta(eta[, 4], .lshape3.q, earg = .eshape3.q)
ans = Scale * exp(lgamma(parg + 1/aa) +
@@ -6244,9 +7078,9 @@ rlino = function(n, shape1, shape2, lambda = 1) {
.eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
.lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
last = eval(substitute(expression({
- misc$link = c(shape1.a = .lshape1.a, scale = .lscale,
+ misc$link <- c(shape1.a = .lshape1.a, scale = .lscale ,
shape2.p = .lshape2.p, shape3.q = .lshape3.q)
- misc$earg = list(shape1.a = .eshape1.a, scale = .escale,
+ misc$earg <- list(shape1.a = .eshape1.a, scale = .escale ,
shape2.p = .eshape2.p, shape3.q = .eshape3.q)
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
.eshape1.a = eshape1.a, .escale = escale,
@@ -6255,12 +7089,12 @@ rlino = function(n, shape1, shape2, lambda = 1) {
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ scale = eta2theta(eta[, 2], .lscale , earg = .escale )
parg = eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
qq = eta2theta(eta[, 4], .lshape3.q, earg = .eshape3.q)
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w * (log(aa) + (aa * parg - 1) * log(y) -
+ sum(c(w) * (log(aa) + (aa * parg - 1) * log(y) -
aa * parg * log(scale) +
- lbeta(parg, qq) - (parg + qq) * log1p((y/scale)^aa)))
}
@@ -6271,7 +7105,7 @@ rlino = function(n, shape1, shape2, lambda = 1) {
vfamily = c("genbetaII"),
deriv = eval(substitute(expression({
aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ scale = eta2theta(eta[, 2], .lscale , earg = .escale )
parg = eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
qq = eta2theta(eta[, 4], .lshape3.q, earg = .eshape3.q)
@@ -6288,7 +7122,7 @@ rlino = function(n, shape1, shape2, lambda = 1) {
dl.dq = temp3 - temp3b - temp4
da.deta = dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
- dscale.deta = dtheta.deta(scale, .lscale, earg = .escale)
+ dscale.deta = dtheta.deta(scale, .lscale , earg = .escale )
dp.deta = dtheta.deta(parg, .lshape2.p, earg = .eshape2.p)
dq.deta = dtheta.deta(qq, .lshape3.q, earg = .eshape3.q)
@@ -6319,17 +7153,17 @@ rlino = function(n, shape1, shape2, lambda = 1) {
ed2l.dscaleq = -aa * parg / (scale*(parg+qq))
ed2l.dpq = -temp5
- wz = matrix(as.numeric(NA), n, dimm(M)) #M==4 means 10=dimm(M)
- wz[,iam(1,1,M)] = ed2l.da * da.deta^2
- wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
- wz[,iam(3,3,M)] = ed2l.dp * dp.deta^2
- wz[,iam(4,4,M)] = ed2l.dq * dq.deta^2
- wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
- wz[,iam(1,3,M)] = ed2l.dap * da.deta * dp.deta
- wz[,iam(1,4,M)] = ed2l.daq * da.deta * dq.deta
- wz[,iam(2,3,M)] = ed2l.dscalep * dscale.deta * dp.deta
- wz[,iam(2,4,M)] = ed2l.dscaleq * dscale.deta * dq.deta
- wz[,iam(3,4,M)] = ed2l.dpq * dp.deta * dq.deta
+ wz = matrix(as.numeric(NA), n, dimm(M)) # M==4 means 10=dimm(M)
+ wz[, iam(1, 1, M)] = ed2l.da * da.deta^2
+ wz[, iam(2, 2, M)] = ed2l.dscale * dscale.deta^2
+ wz[, iam(3, 3, M)] = ed2l.dp * dp.deta^2
+ wz[, iam(4, 4, M)] = ed2l.dq * dq.deta^2
+ wz[, iam(1, 2, M)] = ed2l.dascale * da.deta * dscale.deta
+ wz[, iam(1, 3, M)] = ed2l.dap * da.deta * dp.deta
+ wz[, iam(1, 4, M)] = ed2l.daq * da.deta * dq.deta
+ wz[, iam(2, 3, M)] = ed2l.dscalep * dscale.deta * dp.deta
+ wz[, iam(2, 4, M)] = ed2l.dscaleq * dscale.deta * dq.deta
+ wz[, iam(3, 4, M)] = ed2l.dpq * dp.deta * dq.deta
wz = c(w) * wz
wz
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
@@ -6373,7 +7207,8 @@ qsinmad <- function(p, shape1.a, scale = 1, shape3.q) {
bad = (p < 0) | (p > 1)
ans = NA * p
- LLL = max(length(p), length(shape1.a), length(scale), length(shape3.q))
+ LLL = max(length(p), length(shape1.a), length(scale),
+ length(shape3.q))
if (length(p) != LLL)
p <- rep(p, length.out = LLL)
if (length(shape1.a) != LLL)
@@ -6391,12 +7226,15 @@ qsinmad <- function(p, shape1.a, scale = 1, shape3.q) {
ans
}
+
qlomax <- function(p, scale = 1, shape3.q)
qsinmad(p, shape1.a = 1, scale = scale, shape3.q)
+
qfisk <- function(p, shape1.a, scale = 1)
qsinmad(p, shape1.a, scale = scale, shape3.q = 1)
+
qparalogistic <- function(p, shape1.a, scale = 1)
qsinmad(p, shape1.a, scale = scale, shape1.a)
@@ -6404,7 +7242,8 @@ qparalogistic <- function(p, shape1.a, scale = 1)
qdagum <- function(p, shape1.a, scale = 1, shape2.p) {
- LLL = max(length(p), length(shape1.a), length(scale), length(shape2.p))
+ LLL = max(length(p), length(shape1.a), length(scale),
+ length(shape2.p))
if (length(p) != LLL)
p <- rep(p, length.out = LLL)
if (length(shape1.a) != LLL)
@@ -6439,7 +7278,8 @@ qinvparalogistic <- function(p, shape1.a, scale = 1)
psinmad <- function(q, shape1.a, scale = 1, shape3.q) {
- LLL = max(length(q), length(shape1.a), length(scale), length(shape3.q))
+ LLL = max(length(q), length(shape1.a), length(scale),
+ length(shape3.q))
if (length(q) != LLL)
q <- rep(q, length.out = LLL)
if (length(shape1.a) != LLL)
@@ -6462,15 +7302,15 @@ psinmad <- function(q, shape1.a, scale = 1, shape3.q) {
}
-plomax = function(q, scale = 1, shape3.q)
+plomax <- function(q, scale = 1, shape3.q)
psinmad(q, shape1.a = 1, scale, shape3.q)
-pfisk = function(q, shape1.a, scale = 1)
+pfisk <- function(q, shape1.a, scale = 1)
psinmad(q, shape1.a, scale, shape3.q = 1)
-pparalogistic = function(q, shape1.a, scale = 1)
+pparalogistic <- function(q, shape1.a, scale = 1)
psinmad(q, shape1.a, scale, shape1.a)
@@ -6478,7 +7318,8 @@ pparalogistic = function(q, shape1.a, scale = 1)
pdagum <- function(q, shape1.a, scale = 1, shape2.p) {
- LLL = max(length(q), length(shape1.a), length(scale), length(shape2.p))
+ LLL = max(length(q), length(shape1.a), length(scale),
+ length(shape2.p))
if (length(q) != LLL)
q <- rep(q, length.out = LLL)
if (length(shape1.a) != LLL)
@@ -6514,7 +7355,7 @@ pinvparalogistic <- function(q, shape1.a, scale = 1)
dsinmad <- function(x, shape1.a, scale = 1, shape3.q, log = FALSE) {
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -6549,11 +7390,12 @@ dparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
ddagum <- function(x, shape1.a, scale = 1, shape2.p, log = FALSE) {
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
- LLL = max(length(x), length(shape1.a), length(scale), length(shape2.p))
+ LLL = max(length(x), length(shape1.a), length(scale),
+ length(shape2.p))
x = rep(x, length.out = LLL);
shape1.a = rep(shape1.a, length.out = LLL)
scale = rep(scale, length.out = LLL);
@@ -6580,30 +7422,34 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
- sinmad = function(lshape1.a = "loge",
- lscale = "loge",
- lshape3.q = "loge",
- eshape1.a = list(), escale = list(), eshape3.q = list(),
- ishape1.a = NULL,
- iscale = NULL,
- ishape3.q = 1.0,
- zero = NULL)
+ sinmad <- function(lshape1.a = "loge",
+ lscale = "loge",
+ lshape3.q = "loge",
+ ishape1.a = NULL,
+ iscale = NULL,
+ ishape3.q = 1.0,
+ zero = NULL)
{
- if (mode(lshape1.a) != "character" && mode(lshape1.a) != "name")
- lshape1.a = as.character(substitute(lshape1.a))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (mode(lshape3.q) != "character" && mode(lshape3.q) != "name")
- lshape3.q = as.character(substitute(lshape3.q))
if (length(zero) &&
!is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (!is.list(eshape1.a)) eshape1.a = list()
- if (!is.list(escale)) escale = list()
- if (!is.list(eshape3.q)) eshape3.q = list()
+
+ lshape1.a <- as.list(substitute(lshape1.a))
+ eshape1.a <- link2list(lshape1.a)
+ lshape1.a <- attr(eshape1.a, "function.name")
+
+ lshape3.q <- as.list(substitute(lshape3.q))
+ eshape3.q <- link2list(lshape3.q)
+ lshape3.q <- attr(eshape3.q, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
new("vglmff",
blurb = c("Singh-Maddala distribution\n\n",
@@ -6615,14 +7461,17 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
"gamma(shape3.q - 1/shape1.a) / ",
"gamma(shape3.q)"),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+ predictors.names <-
c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
- namesof("scale", .lscale, earg = .escale, tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE),
namesof("shape3.q", .lshape3.q, earg = .eshape3.q, tag = FALSE))
parg = 1
@@ -6648,9 +7497,10 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
qq[outOfRange] = 1 / aa[outOfRange] + 1
- etastart = cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
- theta2eta(scale, .lscale, earg = .escale),
- theta2eta(qq, .lshape3.q, earg = .eshape3.q))
+ etastart <-
+ cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
+ theta2eta(scale, .lscale , earg = .escale ),
+ theta2eta(qq, .lshape3.q, earg = .eshape3.q))
}
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
.lshape3.q = lshape3.q,
@@ -6661,7 +7511,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
linkinv = eval(substitute(function(eta, extra = NULL) {
aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
parg = 1
qq = eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
@@ -6680,9 +7530,9 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
last = eval(substitute(expression({
misc$link =
- c(shape1.a = .lshape1.a, scale = .lscale, shape3.q = .lshape3.q)
+ c(shape1.a = .lshape1.a, scale = .lscale , shape3.q = .lshape3.q)
misc$earg =
- list(shape1.a = .eshape1.a, scale = .escale, shape3.q = .eshape3.q)
+ list(shape1.a = .eshape1.a, scale = .escale , shape3.q = .eshape3.q)
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
.eshape1.a = eshape1.a, .escale = escale,
@@ -6690,13 +7540,13 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
.lshape3.q = lshape3.q ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ aa = eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a)
+ scale = eta2theta(eta[, 2], .lscale , earg = .escale )
parg = 1
- qq = eta2theta(eta[, 3], .lshape3.q, earg = .earg )
+ qq = eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q )
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w * dsinmad(x = y, shape1.a = aa, scale = scale,
+ sum(c(w) * dsinmad(x = y, shape1.a = aa, scale = scale,
shape3.q = qq, log = TRUE))
}
}, list( .lshape1.a = lshape1.a, .lscale = lscale,
@@ -6706,7 +7556,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
vfamily = c("sinmad"),
deriv = eval(substitute(expression({
aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ scale = eta2theta(eta[, 2], .lscale , earg = .escale )
parg = 1
qq = eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
@@ -6720,7 +7570,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
dl.dq = digamma(parg + qq) - temp3b - log1p(temp2)
da.deta = dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
- dscale.deta = dtheta.deta(scale, .lscale, earg = .escale)
+ dscale.deta = dtheta.deta(scale, .lscale , earg = .escale )
dq.deta = dtheta.deta(qq, .lshape3.q, earg = .eshape3.q)
c(w) * cbind( dl.da * da.deta,
@@ -6742,12 +7592,12 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
ed2l.daq = -(parg * (temp3b -temp3a) -1) / (aa*(parg+qq))
ed2l.dscaleq = -aa * parg / (scale*(parg+qq))
wz = matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
- wz[,iam(1,1,M)] = ed2l.da * da.deta^2
- wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
- wz[,iam(3,3,M)] = ed2l.dq * dq.deta^2
- wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
- wz[,iam(1,3,M)] = ed2l.daq * da.deta * dq.deta
- wz[,iam(2,3,M)] = ed2l.dscaleq * dscale.deta * dq.deta
+ wz[, iam(1, 1, M)] = ed2l.da * da.deta^2
+ wz[, iam(2, 2, M)] = ed2l.dscale * dscale.deta^2
+ wz[, iam(3, 3, M)] = ed2l.dq * dq.deta^2
+ wz[, iam(1, 2, M)] = ed2l.dascale * da.deta * dscale.deta
+ wz[, iam(1, 3, M)] = ed2l.daq * da.deta * dq.deta
+ wz[, iam(2, 3, M)] = ed2l.dscaleq * dscale.deta * dq.deta
wz = c(w) * wz
wz
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
@@ -6757,58 +7607,62 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
}
- dagum = function(lshape1.a = "loge",
- lscale = "loge",
- lshape2.p = "loge",
- eshape1.a = list(), escale = list(), eshape2.p = list(),
- ishape1.a = NULL,
- iscale = NULL,
- ishape2.p = 1.0,
- zero = NULL)
+ dagum <- function(lshape1.a = "loge",
+ lscale = "loge",
+ lshape2.p = "loge",
+ ishape1.a = NULL,
+ iscale = NULL,
+ ishape2.p = 1.0,
+ zero = NULL)
{
- if (mode(lshape1.a) != "character" && mode(lshape1.a) != "name")
- lshape1.a = as.character(substitute(lshape1.a))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (mode(lshape2.p) != "character" && mode(lshape2.p) != "name")
- lshape2.p = as.character(substitute(lshape2.p))
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
- if (!is.list(eshape1.a)) eshape1.a = list()
- if (!is.list(escale)) escale = list()
- if (!is.list(eshape2.p)) eshape2.p = list()
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
+ lshape1.a <- as.list(substitute(lshape1.a))
+ eshape1.a <- link2list(lshape1.a)
+ lshape1.a <- attr(eshape1.a, "function.name")
- new("vglmff",
- blurb = c("Dagum distribution\n\n",
- "Links: ",
- namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("shape2.p", lshape2.p, earg = eshape2.p), "\n",
- "Mean: scale * gamma(shape2.p + 1/shape1.a) * ",
- "gamma(1 - 1/shape1.a) / ",
- "gamma(shape2.p)"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
+ lshape2.p <- as.list(substitute(lshape2.p))
+ eshape2.p <- link2list(lshape2.p)
+ lshape2.p <- attr(eshape2.p, "function.name")
- predictors.names <-
- c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
- namesof("scale", .lscale, earg = .escale, tag = FALSE),
- namesof("shape2.p", .lshape2.p, earg = .eshape2.p, tag = FALSE))
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
- if (!length( .ishape1.a) || !length( .iscale )) {
- qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
- ishape2.p = if (length( .ishape2.p)) .ishape2.p else 1
- xvec = log( qvec^(-1/ ishape2.p ) - 1 )
- fit0 = lsfit(x = xvec, y = log(quantile(y, qvec )))
- }
+
+
+ new("vglmff",
+ blurb = c("Dagum distribution\n\n",
+ "Links: ",
+ namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape2.p", lshape2.p, earg = eshape2.p), "\n",
+ "Mean: scale * gamma(shape2.p + 1/shape1.a) * ",
+ "gamma(1 - 1/shape1.a) / ",
+ "gamma(shape2.p)"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+ predictors.names <-
+ c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE),
+ namesof("shape2.p", .lshape2.p, earg = .eshape2.p, tag = FALSE))
+
+ if (!length( .ishape1.a) || !length( .iscale )) {
+ qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
+ ishape2.p = if (length( .ishape2.p)) .ishape2.p else 1
+ xvec = log( qvec^(-1/ ishape2.p ) - 1 )
+ fit0 = lsfit(x = xvec, y = log(quantile(y, qvec )))
+ }
if (!length(etastart)) {
parg = rep(if (length( .ishape2.p )) .ishape2.p else 1.0,
@@ -6828,19 +7682,20 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
- etastart = cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
- theta2eta(scale, .lscale, earg = .escale),
- theta2eta(parg, .lshape2.p, earg = .eshape2.p))
- }
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .lshape2.p = lshape2.p,
- .eshape1.a = eshape1.a, .escale = escale,
- .eshape2.p = eshape2.p,
- .ishape1.a = ishape1.a, .iscale = iscale,
- .ishape2.p = ishape2.p ))),
+ etastart <-
+ cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
+ theta2eta(scale, .lscale , earg = .escale ),
+ theta2eta(parg, .lshape2.p, earg = .eshape2.p))
+ }
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .lshape2.p = lshape2.p,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p,
+ .ishape1.a = ishape1.a, .iscale = iscale,
+ .ishape2.p = ishape2.p ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
parg = eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
qq = 1
@@ -6857,27 +7712,34 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
.eshape2.p = eshape2.p,
.lshape2.p = lshape2.p ))),
last = eval(substitute(expression({
- misc$link = c(shape1.a = .lshape1.a, scale = .lscale, p = .lshape2.p )
- misc$earg = list(shape1.a = .eshape1.a, scale = .escale, p = .eshape2.p )
- }), list( .lshape1.a = lshape1.a, .lscale = lscale, .lshape2.p = lshape2.p,
- .eshape1.a = eshape1.a, .escale = escale, .eshape2.p = eshape2.p ))),
+ misc$link = c(shape1.a = .lshape1.a, scale = .lscale ,
+ p = .lshape2.p )
+
+ misc$earg = list(shape1.a = .eshape1.a, scale = .escale ,
+ p = .eshape2.p )
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .lshape2.p = lshape2.p,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
parg = eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
qq = 1
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w * ddagum(x = y, shape1.a = aa, scale = Scale,
+ sum(c(w) * ddagum(x = y, shape1.a = aa, scale = Scale,
shape2.p = parg, log = TRUE))
}
- }, list( .lshape1.a = lshape1.a, .lscale = lscale, .lshape2.p = lshape2.p,
- .eshape1.a = eshape1.a, .escale = escale, .eshape2.p = eshape2.p ))),
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .lshape2.p = lshape2.p,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .eshape2.p = eshape2.p ))),
vfamily = c("dagum"),
deriv = eval(substitute(expression({
aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
parg = eta2theta(eta[, 3], .lshape2.p, earg = .eshape2.p)
qq = 1
@@ -6891,7 +7753,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
dl.dp = aa * temp1 + digamma(parg + qq) - temp3a - log1p(temp2)
da.deta = dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
- dscale.deta = dtheta.deta(Scale, .lscale, earg = .escale)
+ dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
dp.deta = dtheta.deta(parg, .lshape2.p, earg = .eshape2.p)
c(w) * cbind( dl.da * da.deta,
@@ -6912,12 +7774,12 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
ed2l.dap= -(qq * (temp3a -temp3b) -1) / (aa*(parg+qq))
ed2l.dscalep = aa * qq / (Scale * (parg + qq))
wz = matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
- wz[,iam(1,1,M)] = ed2l.da * da.deta^2
- wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
- wz[,iam(3,3,M)] = ed2l.dp * dp.deta^2
- wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
- wz[,iam(1,3,M)] = ed2l.dap * da.deta * dp.deta
- wz[,iam(2,3,M)] = ed2l.dscalep * dscale.deta * dp.deta
+ wz[, iam(1, 1, M)] = ed2l.da * da.deta^2
+ wz[, iam(2, 2, M)] = ed2l.dscale * dscale.deta^2
+ wz[, iam(3, 3, M)] = ed2l.dp * dp.deta^2
+ wz[, iam(1, 2, M)] = ed2l.dascale * da.deta * dscale.deta
+ wz[, iam(1, 3, M)] = ed2l.dap * da.deta * dp.deta
+ wz[, iam(2, 3, M)] = ed2l.dscalep * dscale.deta * dp.deta
wz = c(w) * wz
wz
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
@@ -6928,45 +7790,53 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
- betaII = function(lscale = "loge", lshape2.p = "loge", lshape3.q = "loge",
- escale = list(), eshape2.p = list(), eshape3.q = list(),
- iscale = NULL, ishape2.p = 2, ishape3.q = 2,
- zero = NULL) {
+ betaII =
+ function(lscale = "loge", lshape2.p = "loge", lshape3.q = "loge",
+ iscale = NULL, ishape2.p = 2, ishape3.q = 2,
+ zero = NULL) {
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (mode(lshape2.p) != "character" && mode(lshape2.p) != "name")
- lshape2.p = as.character(substitute(lshape2.p))
- if (mode(lshape3.q) != "character" && mode(lshape3.q) != "name")
- lshape3.q = as.character(substitute(lshape3.q))
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
- if (!is.list(escale)) escale = list()
- if (!is.list(eshape2.p)) eshape2.p = list()
- if (!is.list(eshape3.q)) eshape3.q = list()
+ lshape2.p <- as.list(substitute(lshape2.p))
+ eshape2.p <- link2list(lshape2.p)
+ lshape2.p <- attr(eshape2.p, "function.name")
- new("vglmff",
- blurb = c("Beta II distribution\n\n",
- "Links: ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("shape2.p", lshape2.p, earg = eshape2.p), ", ",
- namesof("shape3.q", lshape3.q, earg = eshape3.q), "\n",
- "Mean: scale * gamma(shape2.p + 1) * ",
- "gamma(shape3.q - 1) / ",
- "(gamma(shape2.p) * gamma(shape3.q))"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("scale", .lscale, earg = .escale, tag = FALSE),
- namesof("shape2.p", .lshape2.p, earg = .eshape2.p, tag = FALSE),
- namesof("shape3.q", .lshape3.q, earg = .eshape3.q, tag = FALSE))
+ lshape3.q <- as.list(substitute(lshape3.q))
+ eshape3.q <- link2list(lshape3.q)
+ lshape3.q <- attr(eshape3.q, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
+
+ new("vglmff",
+ blurb = c("Beta II distribution\n\n",
+ "Links: ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape2.p", lshape2.p, earg = eshape2.p), ", ",
+ namesof("shape3.q", lshape3.q, earg = eshape3.q), "\n",
+ "Mean: scale * gamma(shape2.p + 1) * ",
+ "gamma(shape3.q - 1) / ",
+ "(gamma(shape2.p) * gamma(shape3.q))"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+ predictors.names <-
+ c(namesof("scale", .lscale , earg = .escale , tag = FALSE),
+ namesof("shape2.p", .lshape2.p, earg = .eshape2.p, tag = FALSE),
+ namesof("shape3.q", .lshape3.q, earg = .eshape3.q, tag = FALSE))
if (!length( .iscale )) {
qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
@@ -6993,8 +7863,8 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
qq[outOfRange] = 1 / aa + 1
- etastart =
- cbind(theta2eta(scale, .lscale, earg = .escale),
+ etastart <-
+ cbind(theta2eta(scale, .lscale , earg = .escale ),
theta2eta(parg, .lshape2.p, earg = .eshape2.p),
theta2eta(qq, .lshape3.q, earg = .eshape3.q))
}
@@ -7007,7 +7877,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
.ishape3.q = ishape3.q ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
aa = 1
- Scale = eta2theta(eta[, 1], .lscale, earg = .escale)
+ Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
parg = eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
qq = eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
@@ -7024,8 +7894,10 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
.eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
.lshape2.p = lshape2.p, .lshape3.q = lshape3.q ))),
last = eval(substitute(expression({
- misc$link = c(scale = .lscale, shape2.p = .lshape2.p, shape3.q = .lshape3.q)
- misc$earg = list(scale = .escale, shape2.p = .eshape2.p, shape3.q = .eshape3.q)
+ misc$link <- c(scale = .lscale , shape2.p = .lshape2.p,
+ shape3.q = .lshape3.q)
+ misc$earg <- list(scale = .escale , shape2.p = .eshape2.p,
+ shape3.q = .eshape3.q)
}), list( .lscale = lscale,
.escale = escale,
.eshape2.p = eshape2.p, .eshape3.q = eshape3.q,
@@ -7033,12 +7905,12 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
aa = 1
- scale = eta2theta(eta[, 1], .lscale, earg = .escale)
+ scale = eta2theta(eta[, 1], .lscale , earg = .escale )
parg = eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
qq = eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w * (log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
+ sum(c(w) * (log(aa) + (aa*parg-1)*log(y) - aa*parg*log(scale) +
(-lbeta(parg, qq)) - (parg+qq)*log1p((y/scale)^aa)))
}
}, list( .lscale = lscale,
@@ -7048,7 +7920,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
vfamily = c("betaII"),
deriv = eval(substitute(expression({
aa = 1
- scale = eta2theta(eta[, 1], .lscale, earg = .escale)
+ scale = eta2theta(eta[, 1], .lscale , earg = .escale )
parg = eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
qq = eta2theta(eta[, 3], .lshape3.q, earg = .eshape3.q)
@@ -7062,7 +7934,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
dl.dp = aa * temp1 + temp3 - temp3a - temp4
dl.dq = temp3 - temp3b - temp4
- dscale.deta = dtheta.deta(scale, .lscale, earg = .escale)
+ dscale.deta = dtheta.deta(scale, .lscale , earg = .escale )
dp.deta = dtheta.deta(parg, .lshape2.p, earg = .eshape2.p)
dq.deta = dtheta.deta(qq, .lshape3.q, earg = .eshape3.q)
c(w) * cbind( dl.dscale * dscale.deta,
@@ -7081,12 +7953,12 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
ed2l.dscaleq = -aa * parg / (scale*(parg+qq))
ed2l.dpq = -temp5
wz = matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
- wz[,iam(1,1,M)] = ed2l.dscale * dscale.deta^2
- wz[,iam(2,2,M)] = ed2l.dp * dp.deta^2
- wz[,iam(3,3,M)] = ed2l.dq * dq.deta^2
- wz[,iam(1,2,M)] = ed2l.dscalep * dscale.deta * dp.deta
- wz[,iam(1,3,M)] = ed2l.dscaleq * dscale.deta * dq.deta
- wz[,iam(2,3,M)] = ed2l.dpq * dp.deta * dq.deta
+ wz[, iam(1, 1, M)] = ed2l.dscale * dscale.deta^2
+ wz[, iam(2, 2, M)] = ed2l.dp * dp.deta^2
+ wz[, iam(3, 3, M)] = ed2l.dq * dq.deta^2
+ wz[, iam(1, 2, M)] = ed2l.dscalep * dscale.deta * dp.deta
+ wz[, iam(1, 3, M)] = ed2l.dscaleq * dscale.deta * dq.deta
+ wz[, iam(2, 3, M)] = ed2l.dpq * dp.deta * dq.deta
wz = c(w) * wz
wz
}), list( .lscale = lscale,
@@ -7097,42 +7969,52 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
- lomax = function(lscale = "loge",
- lshape3.q = "loge",
- escale = list(), eshape3.q = list(),
- iscale = NULL,
- ishape3.q = 2.0,
- zero = NULL)
+ lomax <- function(lscale = "loge",
+ lshape3.q = "loge",
+ iscale = NULL,
+ ishape3.q = 2.0,
+ zero = NULL)
{
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (mode(lshape3.q) != "character" && mode(lshape3.q) != "name")
- lshape3.q = as.character(substitute(lshape3.q))
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
- if (!is.list(escale)) escale = list()
- if (!is.list(eshape3.q)) eshape3.q = list()
- new("vglmff",
- blurb = c("Lomax distribution\n\n",
- "Links: ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("shape3.q", lshape3.q, earg = eshape3.q), "\n",
- "Mean: scale / (shape3.q - 1)"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("scale", .lscale, earg = .escale, tag = FALSE),
- namesof("shape3.q", .lshape3.q, earg = .eshape3.q, tag = FALSE))
- aa = parg = 1
+ lshape3.q <- as.list(substitute(lshape3.q))
+ eshape3.q <- link2list(lshape3.q)
+ lshape3.q <- attr(eshape3.q, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
+
+ new("vglmff",
+ blurb = c("Lomax distribution\n\n",
+ "Links: ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape3.q", lshape3.q, earg = eshape3.q), "\n",
+ "Mean: scale / (shape3.q - 1)"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+ if (ncol(cbind(y)) != 1)
+ stop("response must be a vector or a one-column matrix")
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+
+ predictors.names <-
+ c(namesof("scale", .lscale , earg = .escale , tag = FALSE),
+ namesof("shape3.q", .lshape3.q, earg = .eshape3.q, tag = FALSE))
+
+ aa = parg = 1
if (!length( .iscale )) {
qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
@@ -7155,8 +8037,9 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
- etastart = cbind(theta2eta(scale, .lscale, earg = .escale),
- theta2eta(qq, .lshape3.q, earg = .eshape3.q))
+ etastart <-
+ cbind(theta2eta(scale, .lscale , earg = .escale ),
+ theta2eta(qq, .lshape3.q, earg = .eshape3.q))
}
}), list( .lscale = lscale, .lshape3.q = lshape3.q,
.escale = escale, .eshape3.q = eshape3.q,
@@ -7181,19 +8064,19 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
}, list( .lscale = lscale, .lshape3.q = lshape3.q,
.escale = escale, .eshape3.q = eshape3.q ))),
last = eval(substitute(expression({
- misc$link = c(scale = .lscale, shape3.q = .lshape3.q)
- misc$earg = list(scale = .escale, shape3.q = .eshape3.q)
+ misc$link = c(scale = .lscale , shape3.q = .lshape3.q)
+ misc$earg = list(scale = .escale , shape3.q = .eshape3.q)
}), list( .lscale = lscale, .lshape3.q = lshape3.q,
.escale = escale, .eshape3.q = eshape3.q ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
aa = 1
- scale = eta2theta(eta[, 1], .lscale, earg = .escale)
+ scale = eta2theta(eta[, 1], .lscale , earg = .escale )
parg = 1
qq = eta2theta(eta[, 2], .lshape3.q, earg = .eshape3.q)
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w * dlomax(x = y, scale = scale,
+ sum(c(w) * dlomax(x = y, scale = scale,
shape3.q = qq, log = TRUE))
}
}, list( .lscale = lscale, .lshape3.q = lshape3.q,
@@ -7201,14 +8084,14 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
vfamily = c("lomax"),
deriv = eval(substitute(expression({
aa = 1
- scale = eta2theta(eta[, 1], .lscale, earg = .escale)
+ scale = eta2theta(eta[, 1], .lscale , earg = .escale )
parg = 1
qq = eta2theta(eta[, 2], .lshape3.q, earg = .eshape3.q)
temp2 = (y/scale)^aa
dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
dl.dq = digamma(parg + qq) - digamma(qq) - log1p(temp2)
- dscale.deta = dtheta.deta(scale, .lscale, earg = .escale)
+ dscale.deta = dtheta.deta(scale, .lscale , earg = .escale )
dq.deta = dtheta.deta(qq, .lshape3.q, earg = .eshape3.q)
c(w) * cbind( dl.dscale * dscale.deta,
dl.dq * dq.deta )
@@ -7218,10 +8101,10 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
ed2l.dq = 1/qq^2
ed2l.dscaleq = -aa * parg / (scale*(parg+qq))
- wz = matrix(as.numeric(NA), n, dimm(M)) #M==2 means 3=dimm(M)
- wz[,iam(1,1,M)] = ed2l.dscale * dscale.deta^2
- wz[,iam(2,2,M)] = ed2l.dq * dq.deta^2
- wz[,iam(1,2,M)] = ed2l.dscaleq * dscale.deta * dq.deta
+ wz = matrix(as.numeric(NA), n, dimm(M)) #M == 2 means 3=dimm(M)
+ wz[, iam(1, 1, M)] = ed2l.dscale * dscale.deta^2
+ wz[, iam(2, 2, M)] = ed2l.dq * dq.deta^2
+ wz[, iam(1, 2, M)] = ed2l.dscaleq * dscale.deta * dq.deta
wz = c(w) * wz
wz
}), list( .lscale = lscale, .lshape3.q = lshape3.q,
@@ -7230,75 +8113,84 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
- fisk = function(lshape1.a = "loge",
- lscale = "loge",
- eshape1.a = list(), escale = list(),
- ishape1.a = NULL,
- iscale = NULL,
- zero = NULL)
+ fisk <- function(lshape1.a = "loge",
+ lscale = "loge",
+ ishape1.a = NULL,
+ iscale = NULL,
+ zero = NULL)
{
- if (mode(lshape1.a) != "character" && mode(lshape1.a) != "name")
- lshape1.a = as.character(substitute(lshape1.a))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
- if (!is.list(eshape1.a)) eshape1.a = list()
- if (!is.list(escale)) escale = list()
+ lshape1.a <- as.list(substitute(lshape1.a))
+ eshape1.a <- link2list(lshape1.a)
+ lshape1.a <- attr(eshape1.a, "function.name")
- new("vglmff",
- blurb = c("Fisk distribution\n\n",
- "Links: ",
- namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ",
- namesof("scale", lscale, earg = escale), "\n",
- "Mean: scale * gamma(1 + 1/shape1.a) * ",
- "gamma(1 - 1/shape1.a)"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- predictors.names =
- c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
- namesof("scale", .lscale, earg = .escale, tag = FALSE))
- qq = parg = 1
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
- if (!length( .iscale )) {
- qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
- xvec = log( 1/qvec - 1 )
- fit0 = lsfit(x = xvec, y = log(quantile(y, qvec )))
- }
- if (!length(etastart)) {
- aa = rep(if (length( .ishape1.a)) .ishape1.a else
- abs(-1 / fit0$coef[2]),
- length.out = n)
- scale = rep(if (length( .iscale )) .iscale else
- exp(fit0$coef[1]),
- length.out = n)
+
+ new("vglmff",
+ blurb = c("Fisk distribution\n\n",
+ "Links: ",
+ namesof("shape1.a", lshape1.a, earg = eshape1.a), ", ",
+ namesof("scale", lscale, earg = escale), "\n",
+ "Mean: scale * gamma(1 + 1/shape1.a) * ",
+ "gamma(1 - 1/shape1.a)"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+ predictors.names <-
+ c(namesof("shape1.a", .lshape1.a , earg = .eshape1.a , tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE))
+
+ qq = parg = 1
+
+ if (!length( .iscale )) {
+ qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
+ xvec = log( 1/qvec - 1 )
+ fit0 = lsfit(x = xvec, y = log(quantile(y, qvec )))
+ }
+
+ if (!length(etastart)) {
+ aa = rep(if (length( .ishape1.a)) .ishape1.a else
+ abs(-1 / fit0$coef[2]),
+ length.out = n)
+ scale = rep(if (length( .iscale )) .iscale else
+ exp(fit0$coef[1]),
+ length.out = n)
parg = 1
qq = 1
outOfRange = (parg + 1/aa <= 0)
- parg[outOfRange] = 1 / aa[outOfRange] + 1
- outOfRange = (qq - 1/aa <= 0)
+ parg[outOfRange] = 1 / aa[outOfRange] + 1
+ outOfRange = (qq - 1/aa <= 0)
qq[outOfRange] = 1 / aa + 1
- etastart = cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
- theta2eta(scale, .lscale, earg = .escale))
- }
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale,
- .ishape1.a = ishape1.a, .iscale = iscale ))),
+ etastart <-
+ cbind(theta2eta(aa, .lshape1.a , earg = .eshape1.a ),
+ theta2eta(scale, .lscale , earg = .escale ))
+ }
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale,
+ .ishape1.a = ishape1.a, .iscale = iscale ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ aa = eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a)
+ Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
parg = 1
qq = 1
@@ -7310,103 +8202,113 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
ans[Scale <= 0] = NA
ans
}, list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale ))),
+ .eshape1.a = eshape1.a, .escale = escale))),
last = eval(substitute(expression({
- misc$link = c(shape1.a = .lshape1.a, scale = .lscale)
- misc$earg = list(shape1.a = .eshape1.a, scale = .escale)
+ misc$link = c(shape1.a = .lshape1.a , scale = .lscale )
+ misc$earg = list(shape1.a = .eshape1.a , scale = .escale )
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = eta2theta(eta[, 1], .lshape1.a, earg = .earg )
- scale = eta2theta(eta[, 2], .lscale, earg = .escale)
- parg = qq = 1
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(w * dfisk(x = y, shape1.a = aa, scale = scale, log = TRUE))
- }
- }, list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale ))),
- vfamily = c("fisk"),
- deriv = eval(substitute(expression({
- aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- scale = eta2theta(eta[, 2], .lscale, earg = .escale)
- parg = qq = 1
+ .eshape1.a = eshape1.a, .escale = escale))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa = eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
+ scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+ parg = qq = 1
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(c(w) * dfisk(x = y, shape1.a = aa, scale = scale, log = TRUE))
+ }
+ }, list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale))),
+ vfamily = c("fisk"),
+ deriv = eval(substitute(expression({
+ aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
+ scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+ parg = qq = 1
+
+ temp1 = log(y/scale)
+ temp2 = (y/scale)^aa
+ temp3a = digamma(parg)
+ temp3b = digamma(qq)
+
+ dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
+ dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
- temp1 = log(y/scale)
- temp2 = (y/scale)^aa
- temp3a = digamma(parg)
- temp3b = digamma(qq)
+ da.deta = dtheta.deta(aa, .lshape1.a , earg = .eshape1.a )
+ dscale.deta = dtheta.deta(scale, .lscale , earg = .escale )
- dl.da = 1/aa + parg * temp1 - (parg+qq) * temp1 / (1+1/temp2)
- dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
- da.deta = dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
- dscale.deta = dtheta.deta(scale, .lscale, earg = .escale)
- c(w) * cbind( dl.da * da.deta,
- dl.dscale * dscale.deta )
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale ))),
+ c(w) * cbind( dl.da * da.deta,
+ dl.dscale * dscale.deta )
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale))),
weight = eval(substitute(expression({
- ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
- (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
- (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
- ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
- ed2l.dascale = (parg - qq - parg*qq*(temp3a -temp3b)) /
- (scale*(1 + parg+qq))
- wz = matrix(as.numeric(NA), n, dimm(M)) #M==2 means 3=dimm(M)
- wz[,iam(1,1,M)] = ed2l.da * da.deta^2
- wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
- wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
- wz = c(w) * wz
- wz
- }), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale ))))
+ ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
+ (temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
+ (parg^2 + qq^2) / (parg*qq)^2)) / (aa^2 * (1+parg+qq))
+ ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
+ ed2l.dascale = (parg - qq - parg*qq*(temp3a -temp3b)) /
+ (scale*(1 + parg+qq))
+ wz = matrix(as.numeric(NA), n, dimm(M)) #M == 2 means 3=dimm(M)
+ wz[, iam(1, 1, M)] = ed2l.da * da.deta^2
+ wz[, iam(2, 2, M)] = ed2l.dscale * dscale.deta^2
+ wz[, iam(1, 2, M)] = ed2l.dascale * da.deta * dscale.deta
+ wz = c(w) * wz
+ wz
+ }), list( .lshape1.a = lshape1.a, .lscale = lscale,
+ .eshape1.a = eshape1.a, .escale = escale))))
}
- invlomax = function(lscale = "loge",
- lshape2.p = "loge",
- escale = list(), eshape2.p = list(),
- iscale = NULL,
- ishape2.p = 1.0,
- zero = NULL)
+ invlomax <- function(lscale = "loge",
+ lshape2.p = "loge",
+ iscale = NULL,
+ ishape2.p = 1.0,
+ zero = NULL)
{
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (mode(lshape2.p) != "character" && mode(lshape2.p) != "name")
- lshape2.p = as.character(substitute(lshape2.p))
-
if (length(zero) &&
!is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (!is.list(escale)) escale = list()
- if (!is.list(eshape2.p)) eshape2.p = list()
- new("vglmff",
- blurb = c("Inverse Lomax distribution\n\n",
- "Links: ",
- namesof("scale", lscale, earg = escale), ", ",
- namesof("shape2.p", lshape2.p, earg = eshape2.p), "\n",
- "Mean: does not exist"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("scale", .lscale, earg = .escale, tag = FALSE),
- namesof("shape2.p", .lshape2.p, earg = .eshape2.p, tag = FALSE))
- qq = aa = 1
+ lshape2.p <- as.list(substitute(lshape2.p))
+ eshape2.p <- link2list(lshape2.p)
+ lshape2.p <- attr(eshape2.p, "function.name")
- if (!length( .iscale )) {
- qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
- ishape2.p = if (length( .ishape2.p)) .ishape2.p else 1
- xvec = log( qvec^(-1/ ishape2.p ) - 1 )
- fit0 = lsfit(x = xvec, y = log(quantile(y, qvec )))
- }
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
+
+
+ new("vglmff",
+ blurb = c("Inverse Lomax distribution\n\n",
+ "Links: ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape2.p", lshape2.p, earg = eshape2.p), "\n",
+ "Mean: does not exist"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+
+ predictors.names <-
+ c(namesof("scale", .lscale , earg = .escale , tag = FALSE),
+ namesof("shape2.p", .lshape2.p, earg = .eshape2.p, tag = FALSE))
+
+ qq = aa = 1
+
+ if (!length( .iscale )) {
+ qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
+ ishape2.p = if (length( .ishape2.p)) .ishape2.p else 1
+ xvec = log( qvec^(-1/ ishape2.p ) - 1 )
+ fit0 = lsfit(x = xvec, y = log(quantile(y, qvec )))
+ }
if (!length(etastart)) {
scale = rep(if (length( .iscale )) .iscale else
exp(fit0$coef[1]),
@@ -7417,15 +8319,15 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
- etastart =
- cbind(theta2eta(scale, .lscale, earg = .escale),
+ etastart <-
+ cbind(theta2eta(scale, .lscale , earg = .escale ),
theta2eta(parg, .lshape2.p, earg = .eshape2.p))
}
}), list( .lscale = lscale, .lshape2.p = lshape2.p,
.escale = escale, .eshape2.p = eshape2.p,
.iscale = iscale, .ishape2.p = ishape2.p ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Scale = eta2theta(eta[, 1], .lscale, earg = .escale)
+ Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
parg = eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
NA * Scale
@@ -7434,8 +8336,8 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
.eshape2.p = eshape2.p,
.lshape2.p = lshape2.p ))),
last = eval(substitute(expression({
- misc$link = c(scale = .lscale, shape2.p = .lshape2.p )
- misc$earg = list(scale = .escale, shape2.p = .eshape2.p )
+ misc$link = c(scale = .lscale , shape2.p = .lshape2.p )
+ misc$earg = list(scale = .escale , shape2.p = .eshape2.p )
}), list( .lscale = lscale,
.escale = escale,
.eshape2.p = eshape2.p,
@@ -7443,12 +8345,12 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
aa = 1
- scale = eta2theta(eta[, 1], .lscale, earg = .escale)
+ scale = eta2theta(eta[, 1], .lscale , earg = .escale )
parg = eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
qq = 1
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w * dinvlomax(x = y, scale = scale,
+ sum(c(w) * dinvlomax(x = y, scale = scale,
shape2.p = parg, log = TRUE))
}
}, list( .lscale = lscale, .lshape2.p = lshape2.p,
@@ -7456,7 +8358,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
vfamily = c("invlomax"),
deriv = eval(substitute(expression({
aa = qq = 1
- scale = eta2theta(eta[, 1], .lscale, earg = .escale)
+ scale = eta2theta(eta[, 1], .lscale , earg = .escale )
parg = eta2theta(eta[, 2], .lshape2.p, earg = .eshape2.p)
temp1 = log(y/scale)
@@ -7465,7 +8367,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
dl.dp = aa * temp1 + digamma(parg + qq) - digamma(parg) - log1p(temp2)
- dscale.deta = dtheta.deta(scale, .lscale, earg = .escale)
+ dscale.deta = dtheta.deta(scale, .lscale , earg = .escale )
dp.deta = dtheta.deta(parg, .lshape2.p, earg = .eshape2.p)
c(w) * cbind( dl.dscale * dscale.deta,
@@ -7476,10 +8378,10 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
ed2l.dp = 1/parg^2
ed2l.dscalep = aa * qq / (scale*(parg+qq))
- wz = matrix(as.numeric(NA), n, dimm(M)) #M==2 means 3=dimm(M)
- wz[,iam(1,1,M)] = ed2l.dscale * dscale.deta^2
- wz[,iam(2,2,M)] = ed2l.dp * dp.deta^2
- wz[,iam(1,2,M)] = ed2l.dscalep * dscale.deta * dp.deta
+ wz = matrix(as.numeric(NA), n, dimm(M)) #M == 2 means 3=dimm(M)
+ wz[, iam(1, 1, M)] = ed2l.dscale * dscale.deta^2
+ wz[, iam(2, 2, M)] = ed2l.dp * dp.deta^2
+ wz[, iam(1, 2, M)] = ed2l.dscalep * dscale.deta * dp.deta
wz = c(w) * wz
wz
}), list( .lscale = lscale, .lshape2.p = lshape2.p,
@@ -7487,25 +8389,28 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
}
- paralogistic = function(lshape1.a = "loge",
- lscale = "loge",
- eshape1.a = list(), escale = list(),
- ishape1.a = 2,
- iscale = NULL,
- zero = NULL)
+ paralogistic <- function(lshape1.a = "loge",
+ lscale = "loge",
+ ishape1.a = 2,
+ iscale = NULL,
+ zero = NULL)
{
- if (mode(lshape1.a) != "character" && mode(lshape1.a) != "name")
- lshape1.a = as.character(substitute(lshape1.a))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
if (length(zero) &&
!is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (!is.list(eshape1.a)) eshape1.a = list()
- if (!is.list(escale)) escale = list()
+
+ lshape1.a <- as.list(substitute(lshape1.a))
+ eshape1.a <- link2list(lshape1.a)
+ lshape1.a <- attr(eshape1.a, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
new("vglmff",
blurb = c("Paralogistic distribution\n\n",
@@ -7515,15 +8420,20 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
"Mean: scale * gamma(1 + 1/shape1.a) * ",
"gamma(shape1.a - 1/shape1.a) / gamma(shape1.a)"),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+
+ predictors.names <-
c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
- namesof("scale", .lscale, earg = .escale, tag = FALSE))
- parg = 1
+ namesof("scale", .lscale , earg = .escale , tag = FALSE))
+
+ parg = 1
if (!length( .ishape1.a) || !length( .iscale )) {
qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
@@ -7552,8 +8462,9 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
qq[outOfRange] = 2 # Need aa > 1, where aa == qq
- etastart = cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
- theta2eta(scale, .lscale, earg = .escale))
+ etastart <-
+ cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
+ theta2eta(scale, .lscale , earg = .escale ))
}
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
.eshape1.a = eshape1.a, .escale = escale,
@@ -7561,7 +8472,7 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
))),
linkinv = eval(substitute(function(eta, extra = NULL) {
aa = eta2theta(eta[, 1], .lshape1.a , earg = .eshape1.a )
- Scale = eta2theta(eta[, 2], .lscale, earg = .escale )
+ Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
parg = 1
qq = aa
@@ -7573,29 +8484,29 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
ans[Scale <= 0] = NA
ans
}, list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale ))),
+ .eshape1.a = eshape1.a, .escale = escale))),
last = eval(substitute(expression({
misc$link = c(shape1.a = .lshape1.a, scale = .lscale)
misc$earg = list(shape1.a = .eshape1.a, scale = .escale )
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale ))),
+ .eshape1.a = eshape1.a, .escale = escale))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ scale = eta2theta(eta[, 2], .lscale , earg = .escale )
parg = 1
qq = aa
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w * dparalogistic(x = y, shape1.a = aa,
+ sum(c(w) * dparalogistic(x = y, shape1.a = aa,
scale = scale, log = TRUE))
}
}, list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale ))),
+ .eshape1.a = eshape1.a, .escale = escale))),
vfamily = c("paralogistic"),
deriv = eval(substitute(expression({
aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ scale = eta2theta(eta[, 2], .lscale , earg = .escale )
parg = 1
qq = aa
@@ -7608,12 +8519,12 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
da.deta = dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
- dscale.deta = dtheta.deta(scale, .lscale, earg = .escale)
+ dscale.deta = dtheta.deta(scale, .lscale , earg = .escale )
c(w) * cbind( dl.da * da.deta,
dl.dscale * dscale.deta)
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale ))),
+ .eshape1.a = eshape1.a, .escale = escale))),
weight = eval(substitute(expression({
ed2l.da = (1 + parg+qq + parg * qq * (trigamma(parg) + trigamma(qq) +
(temp3b - temp3a + (parg-qq)/(parg*qq))^2 -
@@ -7621,34 +8532,35 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
ed2l.dscale = aa^2 * parg * qq / (scale^2 * (1+parg+qq))
ed2l.dascale = (parg - qq - parg*qq*(temp3a -temp3b)) /
(scale*(1 + parg+qq))
- wz = matrix(as.numeric(NA), n, dimm(M)) #M==2 means 3=dimm(M)
- wz[,iam(1,1,M)] = ed2l.da * da.deta^2
- wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
- wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
+ wz = matrix(as.numeric(NA), n, dimm(M)) #M == 2 means 3=dimm(M)
+ wz[, iam(1, 1, M)] = ed2l.da * da.deta^2
+ wz[, iam(2, 2, M)] = ed2l.dscale * dscale.deta^2
+ wz[, iam(1, 2, M)] = ed2l.dascale * da.deta * dscale.deta
wz = c(w) * wz
wz
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale ))))
+ .eshape1.a = eshape1.a, .escale = escale))))
}
- invparalogistic = function(lshape1.a = "loge", lscale = "loge",
- eshape1.a = list(), escale = list(),
- ishape1.a = 2, iscale = NULL,
- zero = NULL)
+ invparalogistic <- function(lshape1.a = "loge", lscale = "loge",
+ ishape1.a = 2, iscale = NULL,
+ zero = NULL)
{
- if (mode(lshape1.a) != "character" && mode(lshape1.a) != "name")
- lshape1.a = as.character(substitute(lshape1.a))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
-
if (length(zero) &&
!is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (!is.list(eshape1.a)) eshape1.a = list()
- if (!is.list(escale)) escale = list()
+
+ lshape1.a <- as.list(substitute(lshape1.a))
+ eshape1.a <- link2list(lshape1.a)
+ lshape1.a <- attr(eshape1.a, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
new("vglmff",
blurb = c("Inverse paralogistic distribution\n\n",
@@ -7658,15 +8570,17 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
"Mean: scale * gamma(shape1.a + 1/shape1.a) * ",
"gamma(1 - 1/shape1.a)/gamma(shape1.a)"),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a, tag = FALSE),
- namesof("scale", .lscale, earg = .escale, tag = FALSE))
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+ predictors.names <-
+ c(namesof("shape1.a", .lshape1.a, earg = .eshape1.a , tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE))
if (!length( .ishape1.a) || !length( .iscale )) {
qvec = c( .25, .5, .75) # Arbitrary; could be made an argument
@@ -7696,15 +8610,16 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
- etastart = cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
- theta2eta(scale, .lscale, earg = .escale))
+ etastart <-
+ cbind(theta2eta(aa, .lshape1.a, earg = .eshape1.a),
+ theta2eta(scale, .lscale , earg = .escale ))
}
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
.eshape1.a = eshape1.a, .escale = escale,
.ishape1.a = ishape1.a, .iscale = iscale ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
parg = aa
qq = 1
@@ -7716,29 +8631,29 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
ans[Scale <= 0] = NA
ans
}, list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale ))),
+ .eshape1.a = eshape1.a, .escale = escale))),
last = eval(substitute(expression({
misc$link = c(shape1.a = .lshape1.a, scale = .lscale )
misc$earg = list(shape1.a = .eshape1.a, scale = .escale )
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale ))),
+ .eshape1.a = eshape1.a, .escale = escale))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ scale = eta2theta(eta[, 2], .lscale , earg = .escale )
parg = aa
qq = 1
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w * dinvparalogistic(x = y, shape1.a = aa,
+ sum(c(w) * dinvparalogistic(x = y, shape1.a = aa,
scale = scale, log = TRUE))
}
}, list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale ))),
+ .eshape1.a = eshape1.a, .escale = escale))),
vfamily = c("invparalogistic"),
deriv = eval(substitute(expression({
aa = eta2theta(eta[, 1], .lshape1.a, earg = .eshape1.a)
- scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ scale = eta2theta(eta[, 2], .lscale , earg = .escale )
parg = aa
qq = 1
@@ -7751,12 +8666,12 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
dl.dscale = (aa/scale) * (-parg + (parg+qq) / (1+1/temp2))
da.deta = dtheta.deta(aa, .lshape1.a, earg = .eshape1.a)
- dscale.deta = dtheta.deta(scale, .lscale, earg = .escale)
+ dscale.deta = dtheta.deta(scale, .lscale , earg = .escale )
c(w) * cbind( dl.da * da.deta,
dl.dscale * dscale.deta )
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale ))),
+ .eshape1.a = eshape1.a, .escale = escale))),
weight = eval(substitute(expression({
ed2l.da = (1 + parg + qq +
@@ -7768,13 +8683,13 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
parg*qq*(temp3a -temp3b)) / (scale*(1 + parg+qq))
wz = matrix(as.numeric(NA), n, dimm(M)) #M==3 means 6=dimm(M)
- wz[,iam(1,1,M)] = ed2l.da * da.deta^2
- wz[,iam(2,2,M)] = ed2l.dscale * dscale.deta^2
- wz[,iam(1,2,M)] = ed2l.dascale * da.deta * dscale.deta
+ wz[, iam(1, 1, M)] = ed2l.da * da.deta^2
+ wz[, iam(2, 2, M)] = ed2l.dscale * dscale.deta^2
+ wz[, iam(1, 2, M)] = ed2l.dascale * da.deta * dscale.deta
wz = c(w) * wz
wz
}), list( .lshape1.a = lshape1.a, .lscale = lscale,
- .eshape1.a = eshape1.a, .escale = escale ))))
+ .eshape1.a = eshape1.a, .escale = escale))))
}
@@ -7788,9 +8703,8 @@ dinvparalogistic <- function(x, shape1.a, scale = 1, log = FALSE)
if (FALSE)
- genlognormal = function(link.sigma = "loge", link.r = "loge",
- esigma = list(), er = list(),
- init.sigma = 1, init.r = 1, zero = NULL)
+ genlognormal <- function(link.sigma = "loge", link.r = "loge",
+ init.sigma = 1, init.r = 1, zero = NULL)
{
warning("2/4/04; doesn't work, possibly because first derivs are ",
"not continuous (sign() is used). Certainly, the derivs wrt ",
@@ -7800,17 +8714,20 @@ warning("2/4/04; doesn't work, possibly because first derivs are ",
- if (mode(link.sigma) != "character" && mode(link.sigma) != "name")
- link.sigma = as.character(substitute(link.sigma))
- if (mode(link.r) != "character" && mode(link.r) != "name")
- link.r = as.character(substitute(link.r))
-
if (length(zero) &&
!is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
stop("bad input for argument 'zero'")
- if (!is.list(esigma)) esigma = list()
- if (!is.list(er)) er = list()
+
+
+ link.sigma <- as.list(substitute(link.sigma))
+ esigma <- link2list(link.sigma)
+ link.sigma <- attr(esigma, "function.name")
+
+ link.r <- as.list(substitute(link.r))
+ er <- link2list(link.r)
+ link.r <- attr(er, "function.name")
+
new("vglmff",
blurb = c("Three-parameter generalized lognormal distribution\n\n",
@@ -7819,12 +8736,12 @@ warning("2/4/04; doesn't work, possibly because first derivs are ",
namesof("sigma", link.sigma, earg = esigma, tag = TRUE), ", ",
namesof("r", link.r, earg = er, tag = TRUE)),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
stop("response must be a vector or a one-column matrix")
- predictors.names =
+ predictors.names <-
c(namesof("loc", "identity", earg = list(), tag = FALSE),
namesof("sigma", .link.sigma, earg = .esigma, tag = FALSE),
namesof("r", .link.r, earg = .er, tag = FALSE))
@@ -7840,9 +8757,10 @@ warning("2/4/04; doesn't work, possibly because first derivs are ",
sigma.init = rep(if (length( .init.sigma)) .init.sigma else
sigma.init, length.out = n)
r.init = if (length( .init.r)) .init.r else init.r
- etastart = cbind(mu=rep(log(median(y)), length.out = n),
- sigma=sigma.init,
- r = r.init)
+ etastart <-
+ cbind(mu = rep(log(median(y)), length.out = n),
+ sigma = sigma.init,
+ r = r.init)
}
}), list( .link.sigma = link.sigma, .link.r = link.r,
.init.sigma = init.sigma, .init.r = init.r ))),
@@ -7866,7 +8784,7 @@ warning("2/4/04; doesn't work, possibly because first derivs are ",
temp89 = (abs(log(y)-mymu)/sigma)^r
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else
- sum(w * (-log(r^(1/r) * sigma) - lgamma(1+1/r) - temp89/r))
+ sum(c(w) * (-log(r^(1/r) * sigma) - lgamma(1+1/r) - temp89/r))
}, list( .link.sigma = link.sigma, .link.r = link.r ))),
vfamily = c("genlognormal3"),
deriv = eval(substitute(expression({
@@ -7900,10 +8818,10 @@ warning("2/4/04; doesn't work, possibly because first derivs are ",
ed2l.dr2 = (ss * trigamma(ss) + B^2 - 1) / r^3
ed2l.dsigmar = -B / (r * sigma)
- wz[,iam(1,1,M)] = ed2l.dmymu2 * dmymu.deta^2
- wz[,iam(2,2,M)] = ed2l.dsigma2 * dsigma.deta^2
- wz[,iam(3,3,M)] = ed2l.dr2 * dr.deta^2
- wz[,iam(2,3,M)] = ed2l.dsigmar * dsigma.deta * dr.deta
+ wz[, iam(1, 1, M)] = ed2l.dmymu2 * dmymu.deta^2
+ wz[, iam(2, 2, M)] = ed2l.dsigma2 * dsigma.deta^2
+ wz[, iam(3, 3, M)] = ed2l.dr2 * dr.deta^2
+ wz[, iam(2, 3, M)] = ed2l.dsigmar * dsigma.deta * dr.deta
wz = c(w) * wz
wz
}))
@@ -7912,11 +8830,12 @@ warning("2/4/04; doesn't work, possibly because first derivs are ",
- betaprime = function(link = "loge", earg = list(), i1=2, i2 = NULL, zero = NULL)
-{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+ betaprime <- function(link = "loge", i1 = 2, i2 = NULL, zero = NULL) {
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
new("vglmff",
blurb = c("Beta-prime distribution\n",
@@ -7927,54 +8846,58 @@ warning("2/4/04; doesn't work, possibly because first derivs are ",
namesof("shape2", link, earg = earg), "\n",
"Mean: shape1/(shape2-1) provided shape2>1"),
constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
- if (ncol(y <- as.matrix(y)) > 1)
- stop("betaprime cannot handle matrix responses yet")
- if (min(y) <= 0)
- stop("response must be positive")
- predictors.names =
- c(namesof("shape1", .link, earg = .earg, short = TRUE),
- namesof("shape2", .link, earg = .earg, short = TRUE))
+
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+
+ predictors.names <-
+ c(namesof("shape1", .link , earg = .earg , short = TRUE),
+ namesof("shape2", .link , earg = .earg , short = TRUE))
if (is.numeric( .i1) && is.numeric( .i2)) {
vec = c( .i1, .i2)
- vec = c(theta2eta(vec[1], .link, earg = .earg ),
- theta2eta(vec[2], .link, earg = .earg ))
- etastart = matrix(vec, n, 2, byrow= TRUE)
+ vec = c(theta2eta(vec[1], .link , earg = .earg ),
+ theta2eta(vec[2], .link , earg = .earg ))
+ etastart <- matrix(vec, n, 2, byrow = TRUE)
}
if (!length(etastart)) {
init1 = if (length( .i1))
rep( .i1, length.out = n) else rep(1, length.out = n)
init2 = if (length( .i2))
rep( .i2, length.out = n) else 1 + init1 / (y + 0.1)
- etastart = matrix(theta2eta(c(init1, init2), .link, earg = .earg ),
- n, 2, byrow = TRUE)
+ etastart <-
+ matrix(theta2eta(c(init1, init2), .link , earg = .earg ),
+ n, 2, byrow = TRUE)
}
- }), list( .link = link, .earg = earg, .i1=i1, .i2=i2 ))),
+ }), list( .link = link, .earg = earg, .i1 = i1, .i2 = i2 ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- shapes = eta2theta(eta, .link, earg = .earg )
+ shapes = eta2theta(eta, .link , earg = .earg )
ifelse(shapes[, 2] > 1, shapes[, 1] / (shapes[, 2]-1), NA)
}, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link = c(shape1 = .link, shape2 = .link)
- misc$earg = list(shape1 = .earg, shape2 = .earg )
+ misc$link = c(shape1 = .link , shape2 = .link)
+ misc$earg = list(shape1 = .earg , shape2 = .earg )
}), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL){
- shapes = eta2theta(eta, .link, earg = .earg )
+ shapes = eta2theta(eta, .link , earg = .earg )
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w *((shapes[, 1]-1) * log(y) -
+ sum(c(w) *((shapes[, 1]-1) * log(y) -
lbeta(shapes[, 1], shapes[, 2]) -
(shapes[, 2]+shapes[, 1]) * log1p(y)))
}
}, list( .link = link, .earg = earg ))),
vfamily = "betaprime",
deriv = eval(substitute(expression({
- shapes = eta2theta(eta, .link, earg = .earg )
- dshapes.deta = dtheta.deta(shapes, .link, earg = .earg )
+ shapes = eta2theta(eta, .link , earg = .earg )
+ dshapes.deta = dtheta.deta(shapes, .link , earg = .earg )
dl.dshapes = cbind(log(y) - log1p(y) - digamma(shapes[, 1]) +
digamma(shapes[, 1]+shapes[, 2]),
- log1p(y) - digamma(shapes[, 2]) +
@@ -7988,9 +8911,9 @@ warning("2/4/04; doesn't work, possibly because first derivs are ",
d2l.dshape1shape2 = temp2
wz = matrix(as.numeric(NA), n, dimm(M)) #3=dimm(M)
- wz[,iam(1,1,M)] = d2l.dshape12 * dshapes.deta[, 1]^2
- wz[,iam(2,2,M)] = d2l.dshape22 * dshapes.deta[, 2]^2
- wz[,iam(1,2,M)] = d2l.dshape1shape2 *
+ wz[, iam(1, 1, M)] = d2l.dshape12 * dshapes.deta[, 1]^2
+ wz[, iam(2, 2, M)] = d2l.dshape22 * dshapes.deta[, 2]^2
+ wz[, iam(1, 2, M)] = d2l.dshape1shape2 *
dshapes.deta[, 1] * dshapes.deta[, 2]
-c(w) * wz
@@ -8002,103 +8925,175 @@ warning("2/4/04; doesn't work, possibly because first derivs are ",
-dmaxwell = function(x, a, log = FALSE) {
- if (!is.logical(log.arg <- log))
+dmaxwell <- function(x, a, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
- L = max(length(x), length(a))
- x = rep(x, length.out = L); a = rep(a, length.out = L);
- logdensity = rep(log(0), length.out = L)
- xok = (x > 0)
- logdensity[xok] = 0.5 * log(2/pi) + 1.5 * log(a[xok]) +
- 2 * log(x[xok]) - 0.5 * a[xok] * x[xok]^2
+ L <- max(length(x), length(a))
+ x <- rep(x, length.out = L); a = rep(a, length.out = L);
+ logdensity <- rep(log(0), length.out = L)
+ xok <- (x > 0)
+ logdensity[xok] <- 0.5 * log(2/pi) + 1.5 * log(a[xok]) +
+ 2 * log(x[xok]) - 0.5 * a[xok] * x[xok]^2
+ logdensity[a <= 0] <- NaN
if (log.arg) logdensity else exp(logdensity)
}
-pmaxwell = function(q, a) {
- if (any(a <= 0))
- stop("argument 'a' must be positive")
- L = max(length(q), length(a))
- q = rep(q, length.out = L); a = rep(a, length.out = L);
- ifelse(q > 0, erf(q*sqrt(a/2)) - q*exp(-0.5*a*q^2) * sqrt(2*a/pi), 0)
+pmaxwell <- function(q, a) {
+ L <- max(length(q), length(a))
+ q <- rep(q, length.out = L);
+ a <- rep(a, length.out = L);
+ ans <- ifelse(q > 0,
+ erf(q*sqrt(a/2)) - q*exp(-0.5*a*q^2) * sqrt(2*a/pi),
+ 0)
+ ans[a <= 0] <- NaN
+ ans
}
-rmaxwell = function(n, a) {
+rmaxwell <- function(n, a) {
sqrt(2 * rgamma(n = n, 1.5) / a)
}
-qmaxwell = function(p, a) {
+qmaxwell <- function(p, a) {
if (!is.Numeric(p, positive = TRUE) || any(p >= 1))
stop("bad input for argument 'p'")
- if (any(a <= 0)) stop("argument 'a' must be positive")
- N = max(length(p), length(a));
- p = rep(p, length.out = N);
- a = rep(a, length.out = N)
+ if (any(a <= 0))
+ stop("argument 'a' must be positive")
+
+ N <- max(length(p), length(a));
+ p <- rep(p, length.out = N);
+ a <- rep(a, length.out = N)
sqrt(2 * qgamma(p = p, 1.5) / a)
}
- maxwell = function(link = "loge", earg = list()) {
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.list(earg)) earg = list()
+ maxwell <- function(link = "loge", zero = NULL) {
- new("vglmff",
- blurb = c("Maxwell distribution f(y) = sqrt(2/pi) * a^(3/2) * y^2 *",
- " exp(-0.5*a*y^2), y>0, a>0\n",
- "Link: ", namesof("a", link, earg = earg), "\n", "\n",
- "Mean: sqrt(8 / (a * pi))"),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names = namesof("a", .link, earg = .earg, tag = FALSE)
- if (!length(etastart)) {
- a.init = rep(8 / (pi*(y+0.1)^2), length = length(y))
- etastart = theta2eta(a.init, .link, earg = .earg )
- }
- }), list( .link = link, .earg = earg ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- a = eta2theta(eta, .link, earg = .earg )
- sqrt(8 / (a * pi))
- }, list( .link = link, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = c(a = .link)
- misc$earg = list(a = .earg )
- }), list( .link = link, .earg = earg ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- aa = eta2theta(eta, .link, earg = .earg )
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else
- sum(w * dmaxwell(x = y, a = aa, log = TRUE))
- }, list( .link = link, .earg = earg ))),
- vfamily = c("maxwell"),
- deriv = eval(substitute(expression({
- a = eta2theta(eta, .link, earg = .earg )
- dl.da = 1.5 / a - 0.5 * y^2
- da.deta = dtheta.deta(a, .link, earg = .earg )
- c(w) * dl.da * da.deta
- }), list( .link = link, .earg = earg ))),
- weight = eval(substitute(expression({
- ed2l.da2 = 1.5 / a^2
- wz = c(w) * da.deta^2 * ed2l.da2
- wz
- }), list( .link = link, .earg = earg ))))
+
+ link <- as.list(substitute(link)) # orig
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
+
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+ new("vglmff",
+ blurb = c("Maxwell distribution f(y;a) = sqrt(2/pi) * a^(3/2) * y^2 *",
+ " exp(-0.5*a*y^2), y>0, a>0\n",
+ "Link: ",
+ namesof("a", link, earg = earg),
+ "\n", "\n",
+ "Mean: sqrt(8 / (a * pi))"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ zero = .zero )
+ }, list( .zero = zero ))),
+
+
+ initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ ncoly <- ncol(y)
+ Musual <- 1
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+ mynames1 <- paste("a", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ namesof(mynames1, .link , earg = .earg )
+
+
+ if (!length(etastart)) {
+ a.init <- 8 / (pi * (y + 0.1)^2)
+ etastart <- theta2eta(a.init, .link , earg = .earg )
+ }
+ }), list( .link = link,
+ .earg = earg ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ aa <- eta2theta(eta, .link , earg = .earg )
+ sqrt(8 / (aa * pi))
+ }, list( .link = link,
+ .earg = earg ))),
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- mynames1
+ for(ilocal in 1:ncoly) {
+ misc$earg[[ilocal]] <- .earg
+ }
+
+ misc$link <- rep( .link , length = ncoly)
+ names(misc$link) <- mynames1
+
+ misc$Musual <- Musual
+ misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
+ }), list( .link = link, .earg = earg ))),
+
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ aa <- eta2theta(eta, .link , earg = .earg )
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else
+ sum(c(w) * dmaxwell(x = y, a = aa, log = TRUE))
+ }, list( .link = link,
+ .earg = earg ))),
+ vfamily = c("maxwell"),
+ deriv = eval(substitute(expression({
+ aa <- eta2theta(eta, .link , earg = .earg )
+
+ dl.da <- 1.5 / aa - 0.5 * y^2
+
+ da.deta <- dtheta.deta(aa, .link , earg = .earg )
+
+ c(w) * dl.da * da.deta
+ }), list( .link = link,
+ .earg = earg ))),
+ weight = eval(substitute(expression({
+ ned2l.da2 <- 1.5 / aa^2
+ wz <- c(w) * ned2l.da2 * da.deta^2
+ wz
+ }), list( .link = link, .earg = earg ))))
}
-dnaka = function(x, shape, scale = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
+
+
+
+dnaka <- function(x, shape, scale = 1, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
rm(log)
L = max(length(x), length(shape), length(scale))
@@ -8115,7 +9110,7 @@ dnaka = function(x, shape, scale = 1, log = FALSE) {
}
-pnaka = function(q, shape, scale = 1) {
+pnaka <- function(q, shape, scale = 1) {
if (!is.Numeric(q))
stop("bad input for argument 'q'")
if (!is.Numeric(shape, positive = TRUE))
@@ -8130,7 +9125,7 @@ pnaka = function(q, shape, scale = 1) {
}
-qnaka = function(p, shape, scale = 1, ...) {
+qnaka <- function(p, shape, scale = 1, ...) {
if (!is.Numeric(p, positive = TRUE) || max(p) >= 1)
stop("bad input for argument 'p'")
if (!is.Numeric(shape, positive = TRUE))
@@ -8141,7 +9136,7 @@ qnaka = function(p, shape, scale = 1, ...) {
p = rep(p, length.out = L); shape = rep(shape, length.out = L);
scale = rep(scale, length.out = L);
ans = rep(0.0, length.out = L)
- myfun = function(x, shape, scale = 1, p)
+ myfun <- function(x, shape, scale = 1, p)
pnaka(q = x, shape = shape, scale = scale) - p
for(ii in 1:L) {
EY = sqrt(scale[ii]/shape[ii]) *
@@ -8158,7 +9153,7 @@ qnaka = function(p, shape, scale = 1, ...) {
}
-rnaka = function(n, shape, scale = 1, Smallno=1.0e-6) {
+rnaka <- function(n, shape, scale = 1, Smallno = 1.0e-6) {
use.n = if ((length.n <- length(n)) > 1) length.n else
if (!is.Numeric(n, integer.valued = TRUE,
@@ -8169,9 +9164,10 @@ rnaka = function(n, shape, scale = 1, Smallno=1.0e-6) {
stop("bad input for argument 'scale'")
if (!is.Numeric(shape, positive = TRUE, allowable.length = 1))
stop("bad input for argument 'shape'")
- if (!is.Numeric(Smallno, positive = TRUE, allowable.length = 1) || Smallno > 0.01 ||
- Smallno < 2 * .Machine$double.eps)
- stop("bad input for argument 'Smallno'")
+ if (!is.Numeric(Smallno, positive = TRUE, allowable.length = 1) ||
+ Smallno > 0.01 ||
+ Smallno < 2 * .Machine$double.eps)
+ stop("bad input for argument 'Smallno'")
ans = rep(0.0, length.out = use.n)
ptr1 = 1; ptr2 = 0
@@ -8200,101 +9196,110 @@ rnaka = function(n, shape, scale = 1, Smallno=1.0e-6) {
- nakagami = function(lshape = "loge", lscale = "loge",
- eshape = list(), escale = list(),
- ishape = NULL, iscale = 1) {
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
+ nakagami <- function(lshape = "loge", lscale = "loge",
+ ishape = NULL, iscale = 1) {
- if (!is.null(iscale) && !is.Numeric(iscale, positive = TRUE))
- stop("argument 'iscale' must be a positive number or NULL")
+ if (!is.null(iscale) && !is.Numeric(iscale, positive = TRUE))
+ stop("argument 'iscale' must be a positive number or NULL")
- if (!is.list(eshape)) eshape = list()
- if (!is.list(escale)) escale = list()
- new("vglmff",
- blurb = c("Nakagami distribution f(y) = 2 * (shape/scale)^shape *\n",
- " ",
- "y^(2*shape-1) * exp(-shape*y^2/scale) / gamma(shape),\n",
- " ",
- "y>0, shape>0, scale>0\n",
- "Links: ",
- namesof("shape", lshape, earg = eshape), ", ",
- namesof("scale", lscale, earg = escale),
- "\n",
- "\n",
- "Mean: sqrt(scale/shape) * gamma(shape+0.5) / gamma(shape)"),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("shape", .lshape, earg = .eshape, tag = FALSE),
- namesof("scale", .lscale, earg = .escale, tag = FALSE))
- if (!length(etastart)) {
- init2 = if (is.Numeric( .iscale, positive = TRUE))
- rep( .iscale, length.out = n) else
- rep(1, length.out = n)
- init1 = if (is.Numeric( .ishape, positive = TRUE))
- rep( .ishape, length.out = n) else
- rep(init2 / (y+1/8)^2, length.out = n)
- etastart = cbind(theta2eta(init1, .lshape, earg = .eshape),
- theta2eta(init2, .lscale, earg = .escale))
- }
- }), list( .lscale = lscale, .lshape = lshape,
- .escale = escale, .eshape = eshape,
- .ishape = ishape, .iscale = iscale ))),
+ lshape <- as.list(substitute(lshape))
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
+ new("vglmff",
+ blurb = c("Nakagami distribution f(y) = 2 * (shape/scale)^shape *\n",
+ " ",
+ "y^(2*shape-1) * exp(-shape*y^2/scale) / gamma(shape),\n",
+ " ",
+ "y>0, shape>0, scale>0\n",
+ "Links: ",
+ namesof("shape", lshape, earg = eshape), ", ",
+ namesof("scale", lscale, earg = escale),
+ "\n",
+ "\n",
+ "Mean: sqrt(scale/shape) * gamma(shape+0.5) / gamma(shape)"),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+
+ predictors.names <-
+ c(namesof("shape", .lshape , earg = .eshape, tag = FALSE),
+ namesof("scale", .lscale , earg = .escale , tag = FALSE))
+
+
+ if (!length(etastart)) {
+ init2 = if (is.Numeric( .iscale, positive = TRUE))
+ rep( .iscale, length.out = n) else
+ rep(1, length.out = n)
+ init1 = if (is.Numeric( .ishape, positive = TRUE))
+ rep( .ishape, length.out = n) else
+ rep(init2 / (y+1/8)^2, length.out = n)
+ etastart <-
+ cbind(theta2eta(init1, .lshape , earg = .eshape ),
+ theta2eta(init2, .lscale , earg = .escale ))
+ }
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape,
+ .ishape = ishape, .iscale = iscale ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- shape = eta2theta(eta[, 1], .lshape, earg = .eshape)
- scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ shape = eta2theta(eta[, 1], .lshape , earg = .eshape )
+ scale = eta2theta(eta[, 2], .lscale , earg = .escale )
sqrt(scale/shape) * gamma(shape+0.5) / gamma(shape)
}, list( .lscale = lscale, .lshape = lshape,
- .escale = escale, .eshape = eshape ))),
+ .escale = escale, .eshape = eshape))),
last = eval(substitute(expression({
- misc$link = c(shape = .lshape, scale = .lscale)
- misc$earg = list(shape = .eshape, scale = .escale)
+ misc$link = c(shape = .lshape , scale = .lscale)
+ misc$earg = list(shape = .eshape, scale = .escale )
misc$expected = TRUE
}), list( .lscale = lscale, .lshape = lshape,
- .escale = escale, .eshape = eshape ))),
+ .escale = escale, .eshape = eshape))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- shape = eta2theta(eta[, 1], .lshape, earg = .eshape)
- scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ shape = eta2theta(eta[, 1], .lshape , earg = .eshape )
+ scale = eta2theta(eta[, 2], .lscale , earg = .escale )
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else
- sum(w * dnaka(x = y, shape = shape, scale = scale, log = TRUE))
+ sum(c(w) * dnaka(x = y, shape = shape, scale = scale, log = TRUE))
}, list( .lscale = lscale, .lshape = lshape,
- .escale = escale, .eshape = eshape ))),
+ .escale = escale, .eshape = eshape))),
vfamily = c("nakagami"),
deriv = eval(substitute(expression({
- shape = eta2theta(eta[, 1], .lshape, earg = .eshape)
- Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ shape = eta2theta(eta[, 1], .lshape , earg = .eshape )
+ Scale = eta2theta(eta[, 2], .lscale , earg = .escale )
dl.dshape = 1 + log(shape/Scale) - digamma(shape) +
2 * log(y) - y^2 / Scale
dl.dscale = -shape/Scale + shape * (y/Scale)^2
- dshape.deta = dtheta.deta(shape, .lshape, earg = .eshape)
- dscale.deta = dtheta.deta(Scale, .lscale, earg = .escale)
+ dshape.deta = dtheta.deta(shape, .lshape , earg = .eshape )
+ dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
c(w) * cbind(dl.dshape * dshape.deta,
dl.dscale * dscale.deta)
}), list( .lscale = lscale, .lshape = lshape,
- .escale = escale, .eshape = eshape ))),
+ .escale = escale, .eshape = eshape))),
weight = eval(substitute(expression({
d2l.dshape2 = trigamma(shape) - 1/shape
d2l.dscale2 = shape / Scale^2
wz = matrix(as.numeric(NA), n, M) # diagonal
- wz[,iam(1,1,M)] = d2l.dshape2 * dshape.deta^2
- wz[,iam(2,2,M)] = d2l.dscale2 * dscale.deta^2
+ wz[, iam(1, 1, M)] = d2l.dshape2 * dshape.deta^2
+ wz[, iam(2, 2, M)] = d2l.dscale2 * dscale.deta^2
c(w) * wz
}), list( .lscale = lscale, .lshape = lshape,
- .escale = escale, .eshape = eshape ))))
+ .escale = escale, .eshape = eshape))))
}
-drayleigh = function(x, scale = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
+drayleigh <- function(x, scale = 1, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
rm(log)
L = max(length(x), length(scale))
@@ -8307,7 +9312,7 @@ drayleigh = function(x, scale = 1, log = FALSE) {
}
-prayleigh = function(q, scale = 1) {
+prayleigh <- function(q, scale = 1) {
if (any(scale <= 0))
stop("argument 'scale' must be positive")
L = max(length(q), length(scale))
@@ -8316,7 +9321,7 @@ prayleigh = function(q, scale = 1) {
}
-qrayleigh = function(p, scale = 1) {
+qrayleigh <- function(p, scale = 1) {
if (any(p <= 0) || any(p >= 1))
stop("argument 'p' must be between 0 and 1")
ans = scale * sqrt(-2 * log1p(-p))
@@ -8325,88 +9330,161 @@ qrayleigh = function(p, scale = 1) {
}
-rrayleigh = function(n, scale = 1) {
- ans = scale * sqrt(-2 * log(runif(n)))
- ans[scale <= 0] = NaN
- ans
-}
+rrayleigh <- function(n, scale = 1) {
+ ans = scale * sqrt(-2 * log(runif(n)))
+ ans[scale <= 0] = NaN
+ ans
+}
+
+
+
+ rayleigh <- function(lscale = "loge",
+ nrfs = 1 / 3 + 0.01,
+ oim.mean = TRUE, zero = NULL) {
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+
+ if (!is.Numeric(nrfs, allowable.length = 1) ||
+ nrfs < 0 ||
+ nrfs > 1)
+ stop("bad input for 'nrfs'")
+
+ if (!is.logical(oim.mean) || length(oim.mean) != 1)
+ stop("bad input for argument 'oim.mean'")
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+
+
+ new("vglmff",
+ blurb = c("Rayleigh distribution\n\n",
+ "f(y) = y*exp(-0.5*(y/scale)^2)/scale^2, y>0, scale>0\n\n",
+ "Link: ",
+ namesof("scale", lscale, earg = escale), "\n\n",
+ "Mean: scale * sqrt(pi / 2)"),
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 1
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 1,
+ zero = .zero )
+ }, list( .zero = zero ))),
+
+
+ initialize = eval(substitute(expression({
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ ncoly <- ncol(y)
+ Musual <- 1
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+ mynames1 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ namesof(mynames1, .lscale , earg = .escale , tag = FALSE)
+
+
+ if (!length(etastart)) {
+ Ymat <- matrix(colSums(y) / colSums(w), n, ncoly, byrow = TRUE)
+ b.init = (Ymat + 1/8) / sqrt(pi/2)
+ etastart <- theta2eta(b.init, .lscale , earg = .escale )
+ }
+ }), list( .lscale = lscale, .escale = escale))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ Scale = eta2theta(eta, .lscale , earg = .escale )
+ Scale * sqrt(pi / 2)
+ }, list( .lscale = lscale, .escale = escale))),
+
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+ misc$link <- c(rep( .lscale , length = ncoly))
+ names(misc$link) <- mynames1
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- mynames1
+ for(ii in 1:ncoly) {
+ misc$earg[[ii]] <- .escale
+ }
+
+ misc$Musual <- Musual
+ misc$multipleResponses <- TRUE
+ misc$nrfs <- .nrfs
+ }), list( .lscale = lscale,
+ .escale = escale, .nrfs = nrfs ))),
+
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ Scale <- eta2theta(eta, .lscale , earg = .escale )
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(c(w) * drayleigh(x = y, scale = Scale, log = TRUE))
+ }
+ }, list( .lscale = lscale, .escale = escale))),
+ vfamily = c("rayleigh"),
+ deriv = eval(substitute(expression({
+ Scale <- eta2theta(eta, .lscale , earg = .escale )
+ dl.dScale <- ((y/Scale)^2 - 2) / Scale
- rayleigh = function(lscale = "loge",
- escale = list(), nrfs = 1 / 3 + 0.01) {
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
+ dScale.deta <- dtheta.deta(Scale, .lscale , earg = .escale )
- if (!is.list(escale)) escale = list()
+ c(w) * dl.dScale * dScale.deta
+ }), list( .lscale = lscale, .escale = escale))),
- if (!is.Numeric(nrfs, allowable.length = 1) ||
- nrfs < 0 ||
- nrfs > 1)
- stop("bad input for 'nrfs'")
+ weight = eval(substitute(expression({
+ d2l.dScale2 <- (3 * (y/Scale)^2 - 2) / Scale^2
+ ned2l.dScale2 <- 4 / Scale^2
+ wz <- c(w) * dScale.deta^2 *
+ ((1 - .nrfs) * d2l.dScale2 + .nrfs * ned2l.dScale2)
- new("vglmff",
- blurb = c("Rayleigh distribution\n\n",
- "f(y) = y*exp(-0.5*(y/scale)^2)/scale^2, y>0, scale>0\n\n",
- "Link: ",
- namesof("scale", lscale, earg = escale), "\n\n",
- "Mean: scale * sqrt(pi / 2)"),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- namesof("scale", .lscale, earg = .escale, tag = FALSE)
+ if (intercept.only && .oim.mean ) {
+ ave.oim <- weighted.mean(d2l.dScale2, w)
+ if (ave.oim > 0) {
+ wz <- c(w) * dScale.deta^2 * ave.oim
+ }
+ }
- if (!length(etastart)) {
- b.init = (y + 1/8) / sqrt(pi/2)
- etastart = theta2eta(b.init, .lscale, earg = .escale)
- }
- }), list( .lscale = lscale, .escale = escale ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- Scale = eta2theta(eta, .lscale, earg = .escale)
- Scale * sqrt(pi/2)
- }, list( .lscale = lscale, .escale = escale ))),
- last = eval(substitute(expression({
- misc$link = c(scale = .lscale)
- misc$earg = list(scale = .escale)
- misc$nrfs = .nrfs
- }), list( .lscale = lscale, .escale = escale, .nrfs = nrfs ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- Scale = eta2theta(eta, .lscale, earg = .escale)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(w * drayleigh(x = y, scale = Scale, log = TRUE))
- }
- }, list( .lscale = lscale, .escale = escale ))),
- vfamily = c("rayleigh"),
- deriv = eval(substitute(expression({
- Scale = eta2theta(eta, .lscale, earg = .escale)
- dl.dScale = ((y/Scale)^2 - 2) / Scale
- dScale.deta = dtheta.deta(Scale, .lscale, earg = .escale)
- c(w) * dl.dScale * dScale.deta
- }), list( .lscale = lscale, .escale = escale ))),
- weight = eval(substitute(expression({
- d2l.dScale2 = (3 * (y/Scale)^2 - 2) / Scale^2
- ed2l.dScale2 = 4 / Scale^2
- wz = c(w) * dScale.deta^2 *
- ((1 - .nrfs) * d2l.dScale2 + .nrfs * ed2l.dScale2)
- wz
- }), list( .lscale = lscale, .escale = escale, .nrfs = nrfs ))))
+ wz
+ }), list( .lscale = lscale,
+ .escale = escale,
+ .nrfs = nrfs, .oim.mean = oim.mean ))))
}
-dparetoIV = function(x, location = 0, scale = 1, inequality = 1, shape = 1, log = FALSE) {
- if (!is.logical(log.arg <- log))
- stop("bad input for argument 'log'")
+dparetoIV <- function(x, location = 0, scale = 1, inequality = 1,
+ shape = 1, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
+ stop("bad input for argument 'log'")
rm(log)
- N = max(length(x), length(location), length(scale), length(inequality),
- length(shape))
+ N = max(length(x), length(location), length(scale),
+ length(inequality), length(shape))
x = rep(x, length.out = N);
location = rep(location, length.out = N)
scale = rep(scale, length.out = N);
@@ -8419,7 +9497,8 @@ dparetoIV = function(x, location = 0, scale = 1, inequality = 1, shape = 1, log
logdensity[xok] = log(shape[xok]) -
log(scale[xok]) - log(inequality[xok]) +
(1/inequality[xok]-1) * log(zedd[xok]) -
- (shape[xok]+1) * log1p(zedd[xok]^(1/inequality[xok]))
+ (shape[xok]+1) *
+ log1p(zedd[xok]^(1/inequality[xok]))
if (log.arg) logdensity else exp(logdensity)
}
@@ -8476,379 +9555,433 @@ rparetoIV =
}
-dparetoIII = function(x, location = 0, scale = 1, inequality = 1,
- log = FALSE)
+dparetoIII <- function(x, location = 0, scale = 1, inequality = 1,
+ log = FALSE)
dparetoIV(x = x, location = location, scale = scale,
inequality = inequality, shape = 1, log = log)
-pparetoIII = function(q, location = 0, scale = 1, inequality=1)
+pparetoIII <- function(q, location = 0, scale = 1, inequality = 1)
pparetoIV(q = q, location = location, scale = scale,
inequality = inequality, shape = 1)
-qparetoIII = function(p, location = 0, scale = 1, inequality=1)
+qparetoIII <- function(p, location = 0, scale = 1, inequality = 1)
qparetoIV(p = p, location = location, scale = scale,
inequality = inequality, shape = 1)
-rparetoIII = function(n, location = 0, scale = 1, inequality=1)
+rparetoIII <- function(n, location = 0, scale = 1, inequality = 1)
rparetoIV(n = n, location= location, scale = scale,
inequality = inequality, shape = 1)
-dparetoII = function(x, location = 0, scale = 1, shape = 1, log = FALSE)
+dparetoII <- function(x, location = 0, scale = 1, shape = 1, log = FALSE)
dparetoIV(x = x, location = location, scale = scale,
inequality = 1, shape = shape,
log = log)
-pparetoII = function(q, location = 0, scale = 1, shape = 1)
+pparetoII <- function(q, location = 0, scale = 1, shape = 1)
pparetoIV(q = q, location = location, scale = scale,
inequality = 1, shape = shape)
-qparetoII = function(p, location = 0, scale = 1, shape = 1)
+qparetoII <- function(p, location = 0, scale = 1, shape = 1)
qparetoIV(p = p, location = location, scale = scale,
inequality = 1, shape = shape)
-rparetoII = function(n, location = 0, scale = 1, shape = 1)
+rparetoII <- function(n, location = 0, scale = 1, shape = 1)
rparetoIV(n = n, location = location, scale = scale,
inequality = 1, shape = shape)
-dparetoI = function(x, scale = 1, shape = 1)
+dparetoI <- function(x, scale = 1, shape = 1)
dparetoIV(x = x, location = scale, scale = scale, inequality = 1,
shape = shape)
-pparetoI = function(q, scale = 1, shape = 1)
+pparetoI <- function(q, scale = 1, shape = 1)
pparetoIV(q = q, location = scale, scale = scale, inequality = 1,
shape = shape)
-qparetoI = function(p, scale = 1, shape = 1)
+qparetoI <- function(p, scale = 1, shape = 1)
qparetoIV(p = p, location = scale, scale = scale, inequality = 1,
shape = shape)
-rparetoI = function(n, scale = 1, shape = 1)
+rparetoI <- function(n, scale = 1, shape = 1)
rparetoIV(n = n, location = scale, scale = scale, inequality = 1,
shape = shape)
- paretoIV = function(location = 0,
- lscale = "loge",
- linequality = "loge",
- lshape = "loge",
- escale = list(), einequality = list(), eshape = list(),
- iscale = 1, iinequality = 1, ishape = NULL,
- imethod = 1) {
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (mode(linequality) != "character" && mode(linequality) != "name")
- linequality = as.character(substitute(linequality))
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
-
- if (!is.Numeric(location))
- stop("argument 'location' must be numeric")
- if (is.Numeric(iscale) && any(iscale <= 0))
- stop("argument 'iscale' must be positive")
- if (is.Numeric(iinequality) && any(iinequality <= 0))
- stop("argument 'iinequality' must be positive")
- if (is.Numeric(ishape) && any(ishape <= 0))
- stop("argument 'ishape' must be positive")
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE) ||
- imethod > 2)
- stop("bad input for argument 'imethod'")
-
- if (linequality == "nloge" && location != 0)
- warning("The Burr distribution has 'location = 0' and ",
- "'linequality = nloge'")
-
- if (!is.list(escale)) escale = list()
- if (!is.list(einequality)) einequality = list()
- if (!is.list(eshape)) eshape = list()
+ paretoIV <- function(location = 0,
+ lscale = "loge",
+ linequality = "loge",
+ lshape = "loge",
+ iscale = 1, iinequality = 1, ishape = NULL,
+ imethod = 1) {
+
+ if (!is.Numeric(location))
+ stop("argument 'location' must be numeric")
+ if (is.Numeric(iscale) && any(iscale <= 0))
+ stop("argument 'iscale' must be positive")
+ if (is.Numeric(iinequality) && any(iinequality <= 0))
+ stop("argument 'iinequality' must be positive")
+ if (is.Numeric(ishape) && any(ishape <= 0))
+ stop("argument 'ishape' must be positive")
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE) ||
+ imethod > 2)
+ stop("bad input for argument 'imethod'")
- new("vglmff",
- blurb = c("Pareto(IV) distribution F(y)=1-[1+((y - ", location,
- ")/scale)^(1/inequality)]^(-shape),",
- "\n", " y > ",
- location, ", scale > 0, inequality > 0, shape > 0,\n",
- "Links: ", namesof("scale", lscale, earg = escale ), ", ",
- namesof("inequality", linequality, earg = einequality ), ", ",
- namesof("shape", lshape, earg = eshape ), "\n",
- "Mean: location + scale * NA"),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("scale", .lscale, earg = .escale, tag = FALSE),
- namesof("inequality", .linequality, earg = .einequality, tag = FALSE),
- namesof("shape", .lshape, earg = .eshape, tag = FALSE))
- extra$location = location = .location
- if (any(y <= location))
- stop("the response must have values > than the 'location' argument")
- if (!length(etastart)) {
- inequality.init = if (length( .iinequality)) .iinequality else 1
- scale.init = if (length( .iscale)) .iscale else 1
- shape.init = if (length( .ishape)) .ishape else NULL
- if (!length(shape.init)) {
- zedd = (y - location) / scale.init
- if ( .imethod == 1) {
- A1 = weighted.mean(1/(1 + zedd^(1/inequality.init)), w = w)
- A2 = weighted.mean(1/(1 + zedd^(1/inequality.init))^2, w = w)
- } else {
- A1 = median(1/(1 + zedd^(1/inequality.init )))
- A2 = median(1/(1 + zedd^(1/inequality.init))^2)
- }
- shape.init = max(0.01, (2*A2-A1)/(A1-A2))
- }
- etastart=cbind(
- theta2eta(rep(scale.init, length.out = n),
- .lscale, earg = .escale),
- theta2eta(rep(inequality.init, length.out = n),
- .linequality, earg = .einequality),
- theta2eta(rep(shape.init, length.out = n),
- .lshape, earg = .eshape))
+ if (linequality == "nloge" && location != 0)
+ warning("The Burr distribution has 'location = 0' and ",
+ "'linequality = nloge'")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+ linequ <- as.list(substitute(linequality))
+ einequ <- link2list(linequ)
+ linequ <- attr(einequ, "function.name")
+
+ lshape <- as.list(substitute(lshape))
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
+
+ iinequ = iinequality
+
+
+
+ new("vglmff",
+ blurb = c("Pareto(IV) distribution F(y)=1-[1+((y - ", location,
+ ")/scale)^(1/inequality)]^(-shape),",
+ "\n",
+ " y > ",
+ location,
+ ", scale > 0, inequality > 0, shape > 0,\n",
+ "Links: ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("inequality", linequ, earg = einequ ),
+ ", ",
+ namesof("shape", lshape, earg = eshape), "\n",
+ "Mean: location + scale * NA"),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+
+ predictors.names <-
+ c(namesof("scale", .lscale , earg = .escale , tag = FALSE),
+ namesof("inequality", .linequ ,
+ earg = .einequ , tag = FALSE),
+ namesof("shape", .lshape , earg = .eshape, tag = FALSE))
+
+
+
+ extra$location = location = .location
+ if (any(y <= location))
+ stop("the response must have values > than the 'location' argument")
+
+ if (!length(etastart)) {
+ inequ.init = if (length( .iinequ )) .iinequ else 1
+ scale.init = if (length( .iscale )) .iscale else 1
+ shape.init = if (length( .ishape )) .ishape else NULL
+
+ if (!length(shape.init)) {
+ zedd = (y - location) / scale.init
+ if ( .imethod == 1) {
+ A1 = weighted.mean(1/(1 + zedd^(1/inequ.init)), w = w)
+ A2 = weighted.mean(1/(1 + zedd^(1/inequ.init))^2, w = w)
+ } else {
+ A1 = median(1/(1 + zedd^(1/inequ.init )))
+ A2 = median(1/(1 + zedd^(1/inequ.init))^2)
}
- }), list( .location = location, .lscale = lscale,
- .linequality = linequality, .lshape = lshape, .imethod = imethod,
- .escale = escale, .einequality = einequality, .eshape = eshape,
- .iscale = iscale, .iinequality=iinequality, .ishape = ishape ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- location = extra$location
- Scale = eta2theta(eta[, 1], .lscale, earg = .escale)
- inequality = eta2theta(eta[, 2], .linequality, earg = .einequality)
- shape = eta2theta(eta[, 3], .lshape, earg = .eshape)
- location + Scale * NA
- }, list( .lscale = lscale, .linequality = linequality, .lshape = lshape,
- .escale = escale, .einequality = einequality, .eshape = eshape ))),
+ shape.init = max(0.01, (2*A2-A1)/(A1-A2))
+ }
+
+ etastart <- cbind(
+ theta2eta(rep(scale.init, length.out = n),
+ .lscale , earg = .escale ),
+ theta2eta(rep(inequ.init, length.out = n),
+ .linequ, earg = .einequ),
+ theta2eta(rep(shape.init, length.out = n),
+ .lshape , earg = .eshape ))
+ }
+ }), list( .location = location, .lscale = lscale,
+ .linequ = linequ, .lshape = lshape, .imethod = imethod,
+ .escale = escale, .einequ = einequ, .eshape = eshape,
+ .iscale = iscale, .iinequ = iinequ, .ishape = ishape ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ location = extra$location
+ Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
+ inequ = eta2theta(eta[, 2], .linequ, earg = .einequ)
+ shape = eta2theta(eta[, 3], .lshape , earg = .eshape )
+ location + Scale * NA
+ }, list( .lscale = lscale, .linequ = linequ, .lshape = lshape,
+ .escale = escale, .einequ = einequ, .eshape = eshape))),
last = eval(substitute(expression({
- misc$link = c("scale" = .lscale, "inequality" = .linequality,
+ misc$link = c("scale" = .lscale , "inequality" = .linequ,
"shape" = .lshape)
- misc$earg = list(scale = .escale, inequality= .einequality,
- shape = .eshape)
+ misc$earg = list(scale = .escale , inequality= .einequ,
+ shape = .eshape )
misc$location = extra$location # Use this for prediction
- }), list( .lscale = lscale, .linequality = linequality, .lshape = lshape,
- .escale = escale, .einequality = einequality, .eshape = eshape ))),
+ }), list( .lscale = lscale, .linequ = linequ,
+ .escale = escale, .einequ = einequ,
+ .lshape = lshape,
+ .eshape = eshape))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
location = extra$location
- Scale = eta2theta(eta[, 1], .lscale, earg = .escale)
- inequality = eta2theta(eta[, 2], .linequality, earg = .einequality)
- shape = eta2theta(eta[, 3], .lshape, earg = .eshape)
+ Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
+ inequ = eta2theta(eta[, 2], .linequ, earg = .einequ)
+ shape = eta2theta(eta[, 3], .lshape , earg = .eshape )
zedd = (y - location) / Scale
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w * dparetoIV(x = y, location = location, scale=Scale,
- inequality=inequality, shape = shape, log = TRUE))
+ sum(c(w) * dparetoIV(x = y, location = location, scale = Scale,
+ inequ = inequ, shape = shape,
+ log = TRUE))
}
- }, list( .lscale = lscale, .linequality = linequality, .lshape = lshape,
- .escale = escale, .einequality = einequality, .eshape = eshape ))),
+ }, list( .lscale = lscale, .linequ = linequ,
+ .escale = escale, .einequ = einequ,
+ .lshape = lshape,
+ .eshape = eshape))),
vfamily = c("paretoIV"),
deriv = eval(substitute(expression({
location = extra$location
- Scale = eta2theta(eta[, 1], .lscale, earg = .escale)
- inequality = eta2theta(eta[, 2], .linequality, earg = .einequality)
- shape = eta2theta(eta[, 3], .lshape, earg = .eshape)
+ Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
+ inequ = eta2theta(eta[, 2], .linequ, earg = .einequ)
+ shape = eta2theta(eta[, 3], .lshape , earg = .eshape )
zedd = (y - location) / Scale
- temp100 = 1 + zedd^(1/inequality)
- dl.dscale = (shape - (1+shape) / temp100) / (inequality * Scale)
- dl.dinequality = ((log(zedd) * (shape - (1+shape)/temp100)) /
- inequality - 1) / inequality
+ temp100 = 1 + zedd^(1/inequ)
+ dl.dscale = (shape - (1+shape) / temp100) / (inequ * Scale)
+ dl.dinequ = ((log(zedd) * (shape - (1+shape)/temp100)) /
+ inequ - 1) / inequ
dl.dshape = -log(temp100) + 1/shape
- dscale.deta = dtheta.deta(Scale, .lscale, earg = .escale)
- dinequality.deta = dtheta.deta(inequality, .linequality, earg = .einequality)
- dshape.deta = dtheta.deta(shape, .lshape, earg = .eshape)
+ dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
+ dinequ.deta = dtheta.deta(inequ, .linequ, earg = .einequ)
+ dshape.deta = dtheta.deta(shape, .lshape , earg = .eshape )
c(w) * cbind(dl.dscale * dscale.deta,
- dl.dinequality * dinequality.deta,
+ dl.dinequ * dinequ.deta,
dl.dshape * dshape.deta)
- }), list( .lscale = lscale, .linequality = linequality, .lshape = lshape,
- .escale = escale, .einequality = einequality, .eshape = eshape ))),
+ }), list( .lscale = lscale, .linequ = linequ,
+ .lshape = lshape,
+ .escale = escale, .einequ = einequ,
+ .eshape = eshape))),
weight = eval(substitute(expression({
temp200 = digamma(shape) - digamma(1) - 1
- d2scale.deta2 = shape / ((inequality*Scale)^2 * (shape+2))
- d2inequality.deta2 = (shape * (temp200^2 + trigamma(shape) + trigamma(1)
- ) + 2*(temp200+1)) / (inequality^2 * (shape+2))
+ d2scale.deta2 = shape / ((inequ*Scale)^2 * (shape+2))
+ d2inequ.deta2 = (shape * (temp200^2 + trigamma(shape) + trigamma(1)
+ ) + 2*(temp200+1)) / (inequ^2 * (shape+2))
d2shape.deta2 = 1 / shape^2
- d2si.deta2 = (shape*(-temp200) -1) / (inequality^2 * Scale * (shape+2))
- d2ss.deta2 = -1 / ((inequality*Scale) * (shape+1))
- d2is.deta2 = temp200 / (inequality*(shape+1))
+ d2si.deta2 = (shape*(-temp200) -1) / (inequ^2 * Scale * (shape+2))
+ d2ss.deta2 = -1 / ((inequ*Scale) * (shape+1))
+ d2is.deta2 = temp200 / (inequ*(shape+1))
wz = matrix(0, n, dimm(M))
- wz[,iam(1,1,M)] = dscale.deta^2 * d2scale.deta2
- wz[,iam(2,2,M)] = dinequality.deta^2 * d2inequality.deta2
- wz[,iam(3,3,M)] = dshape.deta^2 * d2shape.deta2
- wz[,iam(1,2,M)] = dscale.deta * dinequality.deta * d2si.deta2
- wz[,iam(1,3,M)] = dscale.deta * dshape.deta * d2ss.deta2
- wz[,iam(2,3,M)] = dinequality.deta * dshape.deta * d2is.deta2
+ wz[, iam(1, 1, M)] = dscale.deta^2 * d2scale.deta2
+ wz[, iam(2, 2, M)] = dinequ.deta^2 * d2inequ.deta2
+ wz[, iam(3, 3, M)] = dshape.deta^2 * d2shape.deta2
+ wz[, iam(1, 2, M)] = dscale.deta * dinequ.deta * d2si.deta2
+ wz[, iam(1, 3, M)] = dscale.deta * dshape.deta * d2ss.deta2
+ wz[, iam(2, 3, M)] = dinequ.deta * dshape.deta * d2is.deta2
c(w) * wz
- }), list( .lscale = lscale, .linequality = linequality, .lshape = lshape,
- .escale = escale, .einequality = einequality, .eshape = eshape ))))
+ }), list( .lscale = lscale, .linequ = linequ, .lshape = lshape,
+ .escale = escale, .einequ = einequ, .eshape = eshape))))
}
- paretoIII = function(location = 0,
- lscale = "loge",
- linequality = "loge",
- escale = list(), einequality = list(),
- iscale = NULL, iinequality = NULL) {
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (mode(linequality) != "character" && mode(linequality) != "name")
- linequality = as.character(substitute(linequality))
-
- if (!is.Numeric(location))
- stop("argument 'location' must be numeric")
- if (is.Numeric(iscale) && any(iscale <= 0))
- stop("argument 'iscale' must be positive")
- if (is.Numeric(iinequality) && any(iinequality <= 0))
- stop("argument 'iinequality' must be positive")
-
- if (!is.list(escale)) escale = list()
- if (!is.list(einequality)) einequality = list()
+ paretoIII <- function(location = 0,
+ lscale = "loge",
+ linequality = "loge",
+ iscale = NULL, iinequality = NULL) {
- new("vglmff",
- blurb = c("Pareto(III) distribution F(y)=1-[1+((y - ", location,
- ")/scale)^(1/inequality)]^(-1),",
- "\n", " y > ",
- location, ", scale > 0, inequality > 0, \n",
- "Links: ",
- namesof("scale", lscale, earg = escale ), ", ",
- namesof("inequality", linequality, earg = einequality ), "\n",
- "Mean: location + scale * NA"),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("the response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("scale", .lscale, earg = .escale, tag = FALSE),
- namesof("inequality", .linequality, earg = .einequality, tag = FALSE))
- extra$location = location = .location
- if (any(y <= location))
- stop("the response must have values > than the 'location' argument")
- if (!length(etastart)) {
- inequality.init = if (length( .iinequality)) .iinequality else NULL
- scale.init = if (length( .iscale)) .iscale else NULL
- if (!length(inequality.init) || !length(scale.init)) {
+ if (!is.Numeric(location))
+ stop("argument 'location' must be numeric")
+ if (is.Numeric(iscale) && any(iscale <= 0))
+ stop("argument 'iscale' must be positive")
+ if (is.Numeric(iinequality) && any(iinequality <= 0))
+ stop("argument 'iinequality' must be positive")
+
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+ linequ <- as.list(substitute(linequality))
+ einequ <- link2list(linequ)
+ linequ <- attr(einequ, "function.name")
+
+
+ iinequ = iinequality
+
+
+
+ new("vglmff",
+ blurb = c("Pareto(III) distribution F(y)=1-[1+((y - ", location,
+ ")/scale)^(1/inequality)]^(-1),",
+ "\n", " y > ",
+ location, ", scale > 0, inequality > 0, \n",
+ "Links: ",
+ namesof("scale", lscale, earg = escale), ", ",
+ namesof("inequality", linequ, earg = einequ ), "\n",
+ "Mean: location + scale * NA"),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+
+ predictors.names <-
+ c(namesof("scale", .lscale , earg = .escale , tag = FALSE),
+ namesof("inequ", .linequ, earg = .einequ, tag = FALSE))
+ extra$location = location = .location
+
+ if (any(y <= location))
+ stop("the response must have values > than the 'location' argument")
+
+
+ if (!length(etastart)) {
+ inequ.init = if (length( .iinequ)) .iinequ else NULL
+ scale.init = if (length( .iscale )) .iscale else NULL
+ if (!length(inequ.init) || !length(scale.init)) {
probs = (1:4)/5
- ytemp = quantile(x=log(y-location), probs=probs)
- fittemp = lsfit(x=logit(probs), y = ytemp, intercept = TRUE)
- if (!length(inequality.init))
- inequality.init = max(fittemp$coef["X"], 0.01)
+ ytemp = quantile(x = log(y-location), probs = probs)
+ fittemp = lsfit(x = logit(probs), y = ytemp, intercept = TRUE)
+ if (!length(inequ.init))
+ inequ.init = max(fittemp$coef["X"], 0.01)
if (!length(scale.init))
scale.init = exp(fittemp$coef["Intercept"])
}
etastart=cbind(
theta2eta(rep(scale.init, length.out = n),
- .lscale, earg = .escale),
- theta2eta(rep(inequality.init, length.out = n),
- .linequality,
- earg = .einequality))
+ .lscale , earg = .escale ),
+ theta2eta(rep(inequ.init, length.out = n),
+ .linequ,
+ earg = .einequ))
}
- }), list( .location = location, .lscale = lscale, .linequality = linequality,
- .escale = escale, .einequality = einequality,
- .iscale = iscale, .iinequality=iinequality ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- location = extra$location
- Scale = eta2theta(eta[, 1], .lscale, earg = .escale)
- inequality = eta2theta(eta[, 2], .linequality, earg = .einequality)
- location + Scale * NA
- }, list( .lscale = lscale, .linequality = linequality,
- .escale = escale, .einequality = einequality ))),
- last = eval(substitute(expression({
- misc$link = c("scale" = .lscale, "inequality" = .linequality)
- misc$earg = list(scale = .escale, inequality= .einequality)
- misc$location = extra$location # Use this for prediction
- }), list( .lscale = lscale, .linequality = linequality,
- .escale = escale, .einequality = einequality ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ }), list( .location = location, .lscale = lscale,
+ .linequ = linequ,
+ .escale = escale, .einequ = einequ,
+ .iscale = iscale, .iinequ = iinequ ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ location = extra$location
+ Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
+ inequ = eta2theta(eta[, 2], .linequ, earg = .einequ)
+ location + Scale * NA
+ }, list( .lscale = lscale, .linequ = linequ,
+ .escale = escale, .einequ = einequ ))),
+ last = eval(substitute(expression({
+ misc$link = c("scale" = .lscale , "inequality" = .linequ)
+ misc$earg = list("scale" = .escale , "inequality" = .einequ)
+
+ misc$location = extra$location # Use this for prediction
+ }), list( .lscale = lscale, .linequ = linequ,
+ .escale = escale, .einequ = einequ ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
location = extra$location
- Scale = eta2theta(eta[, 1], .lscale, earg = .escale)
- inequality = eta2theta(eta[, 2], .linequality, earg = .einequality)
+ Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
+ inequ = eta2theta(eta[, 2], .linequ, earg = .einequ)
zedd = (y - location) / Scale
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w * dparetoIII(x = y, location = location, scale=Scale,
- inequality=inequality, log = TRUE))
+ sum(c(w) * dparetoIII(x = y, location = location, scale=Scale,
+ inequ=inequ, log = TRUE))
}
- }, list( .lscale = lscale, .linequality = linequality,
- .escale = escale, .einequality = einequality ))),
+ }, list( .lscale = lscale, .linequ = linequ,
+ .escale = escale, .einequ = einequ ))),
vfamily = c("paretoIII"),
deriv = eval(substitute(expression({
location = extra$location
- Scale = eta2theta(eta[, 1], .lscale, earg = .escale)
- inequality = eta2theta(eta[, 2], .linequality, earg = .einequality)
+ Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
+ inequ = eta2theta(eta[, 2], .linequ, earg = .einequ)
shape = 1
zedd = (y - location) / Scale
- temp100 = 1 + zedd^(1/inequality)
- dl.dscale = (shape - (1+shape) / temp100) / (inequality * Scale)
- dl.dinequality = ((log(zedd) * (shape - (1+shape)/temp100)) /
- inequality - 1) / inequality
- dscale.deta = dtheta.deta(Scale, .lscale, earg = .escale)
- dinequality.deta = dtheta.deta(inequality, .linequality, earg = .einequality)
+ temp100 = 1 + zedd^(1/inequ)
+ dl.dscale = (shape - (1+shape) / temp100) / (inequ * Scale)
+ dl.dinequ = ((log(zedd) * (shape - (1+shape)/temp100)) /
+ inequ - 1) / inequ
+ dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
+ dinequ.deta = dtheta.deta(inequ, .linequ, earg = .einequ)
c(w) * cbind(dl.dscale * dscale.deta,
- dl.dinequality * dinequality.deta)
- }), list( .lscale = lscale, .linequality = linequality,
- .escale = escale, .einequality = einequality ))),
+ dl.dinequ * dinequ.deta)
+ }), list( .lscale = lscale, .linequ = linequ,
+ .escale = escale, .einequ = einequ ))),
weight = eval(substitute(expression({
- d2scale.deta2 = 1 / ((inequality*Scale)^2 * 3)
- d2inequality.deta2 = (1 + 2* trigamma(1)) / (inequality^2 * 3)
+ d2scale.deta2 = 1 / ((inequ*Scale)^2 * 3)
+ d2inequ.deta2 = (1 + 2* trigamma(1)) / (inequ^2 * 3)
wz = matrix(0, n, M) # It is diagonal
- wz[,iam(1,1,M)] = dscale.deta^2 * d2scale.deta2
- wz[,iam(2,2,M)] = dinequality.deta^2 * d2inequality.deta2
+ wz[, iam(1, 1, M)] = dscale.deta^2 * d2scale.deta2
+ wz[, iam(2, 2, M)] = dinequ.deta^2 * d2inequ.deta2
c(w) * wz
- }), list( .lscale = lscale, .linequality = linequality,
- .escale = escale, .einequality = einequality ))))
+ }), list( .lscale = lscale, .linequ = linequ,
+ .escale = escale, .einequ = einequ ))))
}
- paretoII = function(location = 0,
- lscale = "loge",
- lshape = "loge",
- escale = list(), eshape = list(),
- iscale = NULL, ishape = NULL) {
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
+ paretoII <- function(location = 0,
+ lscale = "loge",
+ lshape = "loge",
+ iscale = NULL, ishape = NULL) {
- if (!is.Numeric(location))
- stop("argument 'location' must be numeric")
- if (is.Numeric(iscale) && any(iscale <= 0))
- stop("argument 'iscale' must be positive")
- if (is.Numeric(ishape) && any(ishape <= 0))
- stop("argument 'ishape' must be positive")
+ if (!is.Numeric(location))
+ stop("argument 'location' must be numeric")
+ if (is.Numeric(iscale) && any(iscale <= 0))
+ stop("argument 'iscale' must be positive")
+ if (is.Numeric(ishape) && any(ishape <= 0))
+ stop("argument 'ishape' must be positive")
- if (!is.list(escale)) escale = list()
- if (!is.list(eshape)) eshape = list()
- new("vglmff",
- blurb = c("Pareto(II) distribution F(y)=1-[1+(y - ", location,
- ")/scale]^(-shape),",
- "\n", " y > ",
- location, ", scale > 0, shape > 0,\n",
- "Links: ", namesof("scale", lscale, earg = escale ), ", ",
- namesof("shape", lshape, earg = eshape ), "\n",
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
+
+ lshape <- as.list(substitute(lshape))
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
+
+
+
+
+ new("vglmff",
+ blurb = c("Pareto(II) distribution F(y)=1-[1+(y - ", location,
+ ")/scale]^(-shape),",
+ "\n", " y > ",
+ location, ", scale > 0, shape > 0,\n",
+ "Links: ", namesof("scale", lscale, earg = escale), ", ",
+ namesof("shape", lshape, earg = eshape), "\n",
"Mean: location + scale * NA"),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("the response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("scale", .lscale, earg = .escale, tag = FALSE),
- namesof("shape", .lshape, earg = .eshape, tag = FALSE))
- extra$location = location = .location
- if (any(y <= location))
- stop("the response must have values > than the 'location' argument")
- if (!length(etastart)) {
- scale.init = if (length( .iscale)) .iscale else NULL
- shape.init = if (length( .ishape)) .ishape else NULL
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+
+ predictors.names <-
+ c(namesof("scale", .lscale , earg = .escale , tag = FALSE),
+ namesof("shape", .lshape , earg = .eshape , tag = FALSE))
+
+ extra$location = location = .location
+
+ if (any(y <= location))
+ stop("the response must have values > than the 'location' argument")
+
+ if (!length(etastart)) {
+ scale.init = if (length( .iscale )) .iscale else NULL
+ shape.init = if (length( .ishape )) .ishape else NULL
if (!length(shape.init) || !length(scale.init)) {
probs = (1:4)/5
scale.init.0 = 1
- ytemp = quantile(x=log(y-location+scale.init.0), probs=probs)
- fittemp = lsfit(x=log1p(-probs), y = ytemp, intercept = TRUE)
+ ytemp = quantile(x = log(y-location+scale.init.0),
+ probs = probs)
+ fittemp = lsfit(x = log1p(-probs), y = ytemp,
+ intercept = TRUE)
if (!length(shape.init))
shape.init = max(-1/fittemp$coef["X"], 0.01)
if (!length(scale.init))
@@ -8856,73 +9989,73 @@ rparetoI = function(n, scale = 1, shape = 1)
}
etastart=cbind(
theta2eta(rep(scale.init, length.out = n),
- .lscale, earg = .escale),
+ .lscale , earg = .escale ),
theta2eta(rep(shape.init, length.out = n),
- .lshape, earg = .eshape))
+ .lshape , earg = .eshape ))
}
}), list( .location = location, .lscale = lscale,
.escale = escale, .eshape = eshape,
.lshape = lshape, .iscale = iscale, .ishape = ishape ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
location = extra$location
- Scale = eta2theta(eta[, 1], .lscale, earg = .escale)
- shape = eta2theta(eta[, 2], .lshape, earg = .eshape)
+ Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
location + Scale * NA
}, list( .lscale = lscale, .lshape = lshape,
- .escale = escale, .eshape = eshape ))),
+ .escale = escale, .eshape = eshape))),
last = eval(substitute(expression({
- misc$link = c("scale" = .lscale, "shape" = .lshape)
- misc$earg = list("scale" = .escale, "shape" = .eshape)
+ misc$link = c("scale" = .lscale , "shape" = .lshape)
+ misc$earg = list("scale" = .escale , "shape" = .eshape )
misc$location = extra$location # Use this for prediction
}), list( .lscale = lscale, .lshape = lshape,
- .escale = escale, .eshape = eshape ))),
+ .escale = escale, .eshape = eshape))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
location = extra$location
- Scale = eta2theta(eta[, 1], .lscale, earg = .escale)
- shape = eta2theta(eta[, 2], .lshape, earg = .eshape)
+ Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
zedd = (y - location) / Scale
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- sum(w * dparetoII(x = y, location = location, scale=Scale,
+ sum(c(w) * dparetoII(x = y, location = location, scale=Scale,
shape = shape, log = TRUE))
}
}, list( .lscale = lscale, .lshape = lshape,
- .escale = escale, .eshape = eshape ))),
+ .escale = escale, .eshape = eshape))),
vfamily = c("paretoII"),
deriv = eval(substitute(expression({
location = extra$location
- Scale = eta2theta(eta[, 1], .lscale, earg = .escale)
- shape = eta2theta(eta[, 2], .lshape, earg = .eshape)
+ Scale = eta2theta(eta[, 1], .lscale , earg = .escale )
+ shape = eta2theta(eta[, 2], .lshape , earg = .eshape )
zedd = (y - location) / Scale
temp100 = 1 + zedd
dl.dscale = (shape - (1+shape) / temp100) / (1 * Scale)
dl.dshape = -log(temp100) + 1/shape
- dscale.deta = dtheta.deta(Scale, .lscale, earg = .escale)
- dshape.deta = dtheta.deta(shape, .lshape, earg = .eshape)
+ dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
+ dshape.deta = dtheta.deta(shape, .lshape , earg = .eshape )
c(w) * cbind(dl.dscale * dscale.deta,
dl.dshape * dshape.deta)
}), list( .lscale = lscale, .lshape = lshape,
- .escale = escale, .eshape = eshape ))),
+ .escale = escale, .eshape = eshape))),
weight = eval(substitute(expression({
d2scale.deta2 = shape / (Scale^2 * (shape+2))
d2shape.deta2 = 1 / shape^2
d2ss.deta2 = -1 / (Scale * (shape+1))
wz = matrix(0, n, dimm(M))
- wz[,iam(1,1,M)] = dscale.deta^2 * d2scale.deta2
- wz[,iam(2,2,M)] = dshape.deta^2 * d2shape.deta2
- wz[,iam(1,2,M)] = dscale.deta * dshape.deta * d2ss.deta2
+ wz[, iam(1, 1, M)] = dscale.deta^2 * d2scale.deta2
+ wz[, iam(2, 2, M)] = dshape.deta^2 * d2shape.deta2
+ wz[, iam(1, 2, M)] = dscale.deta * dshape.deta * d2ss.deta2
c(w) * wz
- }), list( .lscale = lscale, .lshape = lshape,
- .escale = escale, .eshape = eshape ))))
+ }), list( .lscale = lscale, .lshape = lshape,
+ .escale = escale, .eshape = eshape))))
}
-dpareto = function(x, location, shape, log = FALSE) {
- if (!is.logical(log.arg <- log))
+dpareto <- function(x, location, shape, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -8939,7 +10072,7 @@ dpareto = function(x, location, shape, log = FALSE) {
}
-ppareto = function(q, location, shape) {
+ppareto <- function(q, location, shape) {
L = max(length(q), length(location), length(shape))
q = rep(q, length.out = L);
@@ -8953,7 +10086,7 @@ ppareto = function(q, location, shape) {
}
-qpareto = function(p, location, shape) {
+qpareto <- function(p, location, shape) {
if (any(p <= 0) || any(p >= 1))
stop("argument 'p' must be between 0 and 1")
@@ -8964,7 +10097,7 @@ qpareto = function(p, location, shape) {
}
-rpareto = function(n, location, shape) {
+rpareto <- function(n, location, shape) {
ans = location / runif(n)^(1/shape)
ans[location <= 0] = NaN
ans[shape <= 0] = NaN
@@ -8973,82 +10106,97 @@ rpareto = function(n, location, shape) {
- pareto1 = function(lshape = "loge", earg = list(), location = NULL) {
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
- if (is.Numeric(location) && location <= 0)
- stop("argument 'location' must be positive")
- if (!is.list(earg)) earg = list()
+ pareto1 <- function(lshape = "loge", location = NULL) {
+ if (is.Numeric(location) && location <= 0)
+ stop("argument 'location' must be positive")
- new("vglmff",
- blurb = c("Pareto distribution f(y) = shape * location^shape / y^(shape+1),",
+ lshape <- as.list(substitute(lshape))
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
+
+
+ earg <- eshape
+
+
+ new("vglmff",
+ blurb = c("Pareto distribution ",
+ "f(y) = shape * location^shape / y^(shape+1),",
" 0<location<y, shape>0\n",
"Link: ", namesof("shape", lshape, earg = earg), "\n", "\n",
"Mean: location*shape/(shape-1) for shape>1"),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names = namesof("shape", .lshape, earg = .earg, tag = FALSE)
- locationhat = if (!length( .location)) {
- locationEstimated = TRUE
- min(y) # - .smallno
- } else {
- locationEstimated = FALSE
- .location
- }
- if (any(y < locationhat))
- stop("the value of location is too high (requires 0 < location < min(y))")
- extra$location = locationhat
- extra$locationEstimated = locationEstimated
- if (!length(etastart)) {
- k.init = (y + 1/8) / (y - locationhat + 1/8)
- etastart = theta2eta(k.init, .lshape, earg = .earg )
- }
- }), list( .lshape = lshape, .earg = earg,
- .location = location ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- k = eta2theta(eta, .lshape, earg = .earg )
- location = extra$location
- ifelse(k > 1, k * location / (k-1), NA)
- }, list( .lshape = lshape, .earg = earg ))),
- last = eval(substitute(expression({
- misc$link = c(k = .lshape)
- misc$earg = list(k = .earg )
- misc$location = extra$location # Use this for prediction
- }), list( .lshape = lshape, .earg = earg ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- k = eta2theta(eta, .lshape, earg = .earg )
- location = extra$location
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
+ initialize = eval(substitute(expression({
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
- sum(w * (log(k) + k * log(location) - (k+1) * log(y )))
- }
- }, list( .lshape = lshape, .earg = earg ))),
- vfamily = c("pareto1"),
- deriv = eval(substitute(expression({
- location = extra$location
- k = eta2theta(eta, .lshape, earg = .earg )
- dl.dk = 1/k + log(location/y)
- dk.deta = dtheta.deta(k, .lshape, earg = .earg )
- c(w) * dl.dk * dk.deta
- }), list( .lshape = lshape, .earg = earg ))),
- weight = eval(substitute(expression({
- ed2l.dk2 = 1 / k^2
- wz = c(w) * dk.deta^2 * ed2l.dk2
- wz
- }), list( .lshape = lshape, .earg = earg ))))
+
+ predictors.names <-
+ namesof("shape", .lshape , earg = .earg , tag = FALSE)
+
+
+ locationhat = if (!length( .location)) {
+ locationEstimated = TRUE
+ min(y) # - .smallno
+ } else {
+ locationEstimated = FALSE
+ .location
+ }
+ if (any(y < locationhat))
+ stop("the value of location is too high ",
+ "(requires 0 < location < min(y))")
+ extra$location = locationhat
+ extra$locationEstimated = locationEstimated
+
+ if (!length(etastart)) {
+ k.init = (y + 1/8) / (y - locationhat + 1/8)
+ etastart <- theta2eta(k.init, .lshape , earg = .earg )
+ }
+ }), list( .lshape = lshape, .earg = earg,
+ .location = location ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ k = eta2theta(eta, .lshape , earg = .earg )
+ location = extra$location
+ ifelse(k > 1, k * location / (k-1), NA)
+ }, list( .lshape = lshape, .earg = earg ))),
+ last = eval(substitute(expression({
+ misc$link = c(k = .lshape)
+ misc$earg = list(k = .earg )
+
+ misc$location = extra$location # Use this for prediction
+ }), list( .lshape = lshape, .earg = earg ))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ k = eta2theta(eta, .lshape , earg = .earg )
+ location = extra$location
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+
+
+ sum(c(w) * (log(k) + k * log(location) - (k+1) * log(y )))
+ }
+ }, list( .lshape = lshape, .earg = earg ))),
+ vfamily = c("pareto1"),
+ deriv = eval(substitute(expression({
+ location = extra$location
+ k = eta2theta(eta, .lshape , earg = .earg )
+ dl.dk = 1/k + log(location/y)
+ dk.deta = dtheta.deta(k, .lshape , earg = .earg )
+ c(w) * dl.dk * dk.deta
+ }), list( .lshape = lshape, .earg = earg ))),
+ weight = eval(substitute(expression({
+ ed2l.dk2 = 1 / k^2
+ wz = c(w) * dk.deta^2 * ed2l.dk2
+ wz
+ }), list( .lshape = lshape, .earg = earg ))))
}
-dtpareto = function(x, lower, upper, shape, log = FALSE) {
+dtpareto <- function(x, lower, upper, shape, log = FALSE) {
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -9082,7 +10230,7 @@ dtpareto = function(x, lower, upper, shape, log = FALSE) {
}
-ptpareto = function(q, lower, upper, shape) {
+ptpareto <- function(q, lower, upper, shape) {
if (!is.Numeric(q))
stop("bad input for argument 'q'")
@@ -9107,7 +10255,7 @@ ptpareto = function(q, lower, upper, shape) {
}
-qtpareto = function(p, lower, upper, shape) {
+qtpareto <- function(p, lower, upper, shape) {
if (!is.Numeric(p, positive = TRUE))
stop("bad input for argument 'p'")
if (max(p) >= 1)
@@ -9122,7 +10270,7 @@ qtpareto = function(p, lower, upper, shape) {
}
-rtpareto = function(n, lower, upper, shape) {
+rtpareto <- function(n, lower, upper, shape) {
ans = qtpareto(p = runif(n), lower = lower, upper = upper, shape = shape)
ans[lower <= 0] = NaN
@@ -9134,77 +10282,88 @@ rtpareto = function(n, lower, upper, shape) {
- tpareto1 = function(lower, upper, lshape = "loge", earg = list(),
- ishape = NULL, imethod = 1) {
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
+ tpareto1 <- function(lower, upper, lshape = "loge",
+ ishape = NULL, imethod = 1) {
- if (!is.Numeric(lower, positive = TRUE, allowable.length = 1))
- stop("bad input for argument 'lower'")
- if (!is.Numeric(upper, positive = TRUE, allowable.length = 1))
- stop("bad input for argument 'upper'")
- if (lower >= upper)
- stop("lower < upper is required")
+ if (!is.Numeric(lower, positive = TRUE, allowable.length = 1))
+ stop("bad input for argument 'lower'")
+ if (!is.Numeric(upper, positive = TRUE, allowable.length = 1))
+ stop("bad input for argument 'upper'")
+ if (lower >= upper)
+ stop("lower < upper is required")
- if (length(ishape) && !is.Numeric(ishape, positive = TRUE))
- stop("bad input for argument 'ishape'")
- if (!is.list(earg)) earg = list()
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
+ if (length(ishape) && !is.Numeric(ishape, positive = TRUE))
+ stop("bad input for argument 'ishape'")
- new("vglmff",
- blurb = c("Truncated Pareto distribution f(y) = shape * lower^shape /",
- "(y^(shape+1) * (1-(lower/upper)^shape)),",
- " 0 < lower < y < upper < Inf, shape>0\n",
- "Link: ", namesof("shape", lshape, earg = earg), "\n", "\n",
- "Mean: shape*lower^shape*(upper^(1-shape)-lower^(1-shape)) /",
- " ((1-shape) * (1-(lower/upper)^shape))"),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names = namesof("shape", .lshape, earg = .earg,
- tag = FALSE)
- if (any(y <= .lower))
- stop("the value of argument 'lower' is too high ",
- "(requires '0 < lower < min(y)')")
- extra$lower = .lower
- if (any(y >= .upper))
- stop("the value of argument 'upper' is too low ",
- "(requires 'max(y) < upper')")
- extra$upper = .upper
+ lshape <- as.list(substitute(lshape))
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
+ earg <- eshape
- if (!length(etastart)) {
- shape.init = if (is.Numeric( .ishape)) 0 * y + .ishape else
- if ( .imethod == 2) {
- 0 * y + median(rep((y + 1/8) / (y - .lower + 1/8), times=w))
- } else {
- tpareto1.Loglikfun = function(shape, y, x, w, extraargs) {
- myratio = .lower / .upper
- sum(w * (log(shape) + shape * log( .lower) -
- (shape+1) * log(y) - log1p(-myratio^shape)))
- }
- shape.grid = 2^((-4):4)
- try.this = getMaxMin(shape.grid, objfun = tpareto1.Loglikfun,
- y = y, x = x, w = w)
- try.this = rep(try.this, length.out = n)
- try.this
- }
- etastart = theta2eta(shape.init, .lshape, earg = .earg )
+
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+
+
+ new("vglmff",
+ blurb = c("Truncated Pareto distribution f(y) = shape * lower^shape /",
+ "(y^(shape+1) * (1-(lower/upper)^shape)),",
+ " 0 < lower < y < upper < Inf, shape>0\n",
+ "Link: ", namesof("shape", lshape, earg = earg), "\n", "\n",
+ "Mean: shape*lower^shape*(upper^(1-shape)-lower^(1-shape)) /",
+ " ((1-shape) * (1-(lower/upper)^shape))"),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+
+
+ predictors.names <- namesof("shape", .lshape , earg = .earg ,
+ tag = FALSE)
+ if (any(y <= .lower))
+ stop("the value of argument 'lower' is too high ",
+ "(requires '0 < lower < min(y)')")
+
+ extra$lower = .lower
+ if (any(y >= .upper))
+ stop("the value of argument 'upper' is too low ",
+ "(requires 'max(y) < upper')")
+ extra$upper = .upper
+
+ if (!length(etastart)) {
+ shape.init = if (is.Numeric( .ishape )) 0 * y + .ishape else
+ if ( .imethod == 2) {
+ 0 * y + median(rep((y + 1/8) / (y - .lower + 1/8), times = w))
+ } else {
+ tpareto1.Loglikfun <- function(shape, y, x, w, extraargs) {
+ myratio = .lower / .upper
+ sum(c(w) * (log(shape) + shape * log( .lower) -
+ (shape+1) * log(y) - log1p(-myratio^shape)))
}
- }), list( .lshape = lshape, .earg = earg,
- .ishape = ishape,
- .imethod = imethod,
- .lower = lower, .upper = upper ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- shape = eta2theta(eta, .lshape, earg = .earg )
- myratio = .lower / .upper
- constprop = shape * .lower^shape / (1 - myratio^shape)
- constprop * ( .upper^(1-shape) - .lower^(1-shape)) / (1-shape)
- }, list( .lshape = lshape, .earg = earg,
+ shape.grid = 2^((-4):4)
+ try.this = getMaxMin(shape.grid, objfun = tpareto1.Loglikfun,
+ y = y, x = x, w = w)
+ try.this = rep(try.this, length.out = n)
+ try.this
+ }
+ etastart <- theta2eta(shape.init, .lshape , earg = .earg )
+ }
+ }), list( .lshape = lshape, .earg = earg,
+ .ishape = ishape,
+ .imethod = imethod,
+ .lower = lower, .upper = upper ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ shape = eta2theta(eta, .lshape , earg = .earg )
+ myratio = .lower / .upper
+ constprop = shape * .lower^shape / (1 - myratio^shape)
+ constprop * ( .upper^(1-shape) - .lower^(1-shape)) / (1-shape)
+ }, list( .lshape = lshape, .earg = earg,
.lower = lower, .upper = upper ))),
last = eval(substitute(expression({
misc$link = c(shape = .lshape)
@@ -9216,23 +10375,24 @@ rtpareto = function(n, lower, upper, shape) {
.lower = lower, .upper = upper ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- shape = eta2theta(eta, .lshape, earg = .earg )
+ shape = eta2theta(eta, .lshape , earg = .earg )
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else {
- ans = sum(w * dtpareto(x = y, lower = .lower , upper = .upper ,
- shape = shape, log = TRUE))
+ ans = sum(c(w) * dtpareto(x = y, lower = .lower ,
+ upper = .upper ,
+ shape = shape, log = TRUE))
ans
}
}, list( .lshape = lshape, .earg = earg,
.lower = lower, .upper = upper ))),
vfamily = c("tpareto1"),
deriv = eval(substitute(expression({
- shape = eta2theta(eta, .lshape, earg = .earg )
+ shape = eta2theta(eta, .lshape , earg = .earg )
myratio = .lower / .upper
myratio2 = myratio^shape
tmp330 = myratio2 * log(myratio) / (1 - myratio2)
dl.dshape = 1 / shape + log( .lower) - log(y) + tmp330
- dshape.deta = dtheta.deta(shape, .lshape, earg = .earg )
+ dshape.deta = dtheta.deta(shape, .lshape , earg = .earg )
c(w) * dl.dshape * dshape.deta
}), list( .lshape = lshape, .earg = earg,
.lower = lower, .upper = upper ))),
@@ -9247,47 +10407,56 @@ rtpareto = function(n, lower, upper, shape) {
-erf = function(x)
+erf <- function(x)
2 * pnorm(x * sqrt(2)) - 1
-erfc = function(x)
+erfc <- function(x)
2 * pnorm(x * sqrt(2), lower.tail = FALSE)
- wald <- function(link.lambda = "loge", earg = list(), init.lambda = NULL)
+ wald <- function(link.lambda = "loge", init.lambda = NULL)
{
- if (mode(link.lambda) != "character" && mode(link.lambda) != "name")
- link.lambda = as.character(substitute(link.lambda))
- if (!is.list(earg)) earg = list()
- new("vglmff",
- blurb = c("Standard Wald distribution\n\n",
- "f(y) = sqrt(lambda/(2*pi*y^3)) * exp(-lambda*(y-1)^2/(2*y)), y&lambda>0",
- "\n",
- "Link: ",
- namesof("lambda", link.lambda, earg = earg), "\n",
- "Mean: ", "1\n",
- "Variance: 1 / lambda"),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- if (any(y <= 0)) stop("Require the response to have positive values")
- predictors.names =
- namesof("lambda", .link.lambda, earg = .earg, short = TRUE)
- if (!length(etastart)) {
- initlambda = if (length( .init.lambda)) .init.lambda else
- 1 / (0.01 + (y-1)^2)
- initlambda = rep(initlambda, length.out = n)
- etastart =
- cbind(theta2eta(initlambda,
- link = .link.lambda , earg = .earg ))
- }
- }), list( .link.lambda = link.lambda, .earg = earg,
- .init.lambda=init.lambda ))),
- linkinv = function(eta, extra = NULL) {
- 0*eta + 1
- },
+ link.lambda <- as.list(substitute(link.lambda))
+ earg <- link2list(link.lambda)
+ link.lambda <- attr(earg, "function.name")
+
+
+
+ new("vglmff",
+ blurb = c("Standard Wald distribution\n\n",
+ "f(y) = sqrt(lambda/(2*pi*y^3)) * ",
+ "exp(-lambda*(y-1)^2/(2*y)), y&lambda>0",
+ "\n",
+ "Link: ",
+ namesof("lambda", link.lambda, earg = earg), "\n",
+ "Mean: ", "1\n",
+ "Variance: 1 / lambda"),
+ initialize = eval(substitute(expression({
+
+ w.y.check(w = w, y = y,
+ Is.positive.y = TRUE,
+ ncol.w.max = 1, ncol.y.max = 1)
+
+
+ predictors.names <-
+ namesof("lambda", .link.lambda, earg = .earg , short = TRUE)
+
+
+ if (!length(etastart)) {
+ initlambda = if (length( .init.lambda)) .init.lambda else
+ 1 / (0.01 + (y-1)^2)
+ initlambda = rep(initlambda, length.out = n)
+ etastart <-
+ cbind(theta2eta(initlambda,
+ link = .link.lambda , earg = .earg ))
+ }
+ }), list( .link.lambda = link.lambda, .earg = earg,
+ .init.lambda=init.lambda ))),
+ linkinv = function(eta, extra = NULL) {
+ 0*eta + 1
+ },
last = eval(substitute(expression({
misc$link = c(lambda = .link.lambda )
misc$earg = list(lambda = .earg )
@@ -9297,7 +10466,7 @@ erfc = function(x)
lambda = eta2theta(eta, link=.link.lambda, earg = .earg )
if (residuals) stop("loglikelihood residuals ",
"not implemented yet") else
- sum(w * (0.5 * log(lambda/(2*pi*y^3)) - lambda * (y-1)^2 / (2*y)))
+ sum(c(w) * (0.5 * log(lambda/(2*pi*y^3)) - lambda * (y-1)^2 / (2*y)))
}, list( .link.lambda = link.lambda, .earg = earg ))),
vfamily = "wald",
deriv = eval(substitute(expression({
@@ -9313,99 +10482,114 @@ erfc = function(x)
}
- expexp = function(lshape = "loge", lscale = "loge",
- eshape = list(), escale = list(),
- ishape = 1.1, iscale = NULL, # ishape cannot be 1
- tolerance = 1.0e-6,
- zero = NULL) {
+ expexp <- function(lshape = "loge", lscale = "loge",
+ ishape = 1.1, iscale = NULL, # ishape cannot be 1
+ tolerance = 1.0e-6,
+ zero = NULL) {
+
+
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
+ stop("bad input for argument 'zero'")
+
+ if (!is.Numeric(tolerance, positive = TRUE, allowable.length = 1) ||
+ tolerance > 1.0e-2)
+ stop("bad input for argument 'tolerance'")
+ if (!is.Numeric(ishape, positive = TRUE))
+ stop("bad input for argument 'ishape'")
+
+ if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
+ stop("bad input for argument 'iscale'")
+
+ ishape[ishape == 1] = 1.1 # Fails in @deriv
+
- if (mode(lshape) != "character" && mode(lshape) != "name")
- lshape = as.character(substitute(lshape))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
+ lshape <- as.list(substitute(lshape))
+ eshape <- link2list(lshape)
+ lshape <- attr(eshape, "function.name")
- if (!is.Numeric(tolerance, positive = TRUE, allowable.length = 1) ||
- tolerance > 1.0e-2)
- stop("bad input for argument 'tolerance'")
- if (!is.Numeric(ishape, positive = TRUE))
- stop("bad input for argument 'ishape'")
- if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
- stop("bad input for argument 'iscale'")
- ishape[ishape == 1] = 1.1 # Fails in @deriv
+ new("vglmff",
+ blurb = c("Exponentiated Exponential Distribution\n",
+ "Links: ",
+ namesof("shape", lshape, earg = eshape), ", ",
+ namesof("scale", lscale, earg = escale),"\n",
+ "Mean: (digamma(shape+1)-digamma(1))/scale"),
+ constraints = eval(substitute(expression({
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
+ }), list( .zero = zero ))),
+ initialize = eval(substitute(expression({
- if (!is.list(escale)) escale = list()
- if (!is.list(eshape)) eshape = list()
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
- new("vglmff",
- blurb = c("Exponentiated Exponential Distribution\n",
- "Links: ",
- namesof("shape", lshape, earg = eshape), ", ",
- namesof("scale", lscale, earg = escale),"\n",
- "Mean: (digamma(shape+1)-digamma(1))/scale"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("shape", .lshape, earg = .eshape, short = TRUE),
- namesof("scale", .lscale, earg = .escale, short = TRUE))
- if (!length(etastart)) {
+
+
+ predictors.names <-
+ c(namesof("shape", .lshape , earg = .eshape, short = TRUE),
+ namesof("scale", .lscale , earg = .escale , short = TRUE))
+
+
+ if (!length(etastart)) {
shape.init = if (!is.Numeric( .ishape, positive = TRUE))
stop("argument 'ishape' must be positive") else
rep( .ishape, length.out = n)
- scale.init = if (length( .iscale))
+ scale.init = if (length( .iscale ))
rep( .iscale, length.out = n) else
(digamma(shape.init+1) - digamma(1)) / (y+1/8)
scale.init = rep(weighted.mean(scale.init, w = w),
length.out = n)
- etastart = cbind(theta2eta(shape.init, .lshape, earg = .eshape),
- theta2eta(scale.init, .lscale, earg = .escale))
+ etastart <-
+ cbind(theta2eta(shape.init, .lshape , earg = .eshape ),
+ theta2eta(scale.init, .lscale , earg = .escale ))
}
- }), list( .lshape = lshape, .lscale = lscale, .iscale = iscale, .ishape = ishape,
- .eshape = eshape, .escale = escale ))),
+ }), list( .lshape = lshape, .lscale = lscale,
+ .iscale = iscale, .ishape = ishape,
+ .eshape = eshape, .escale = escale))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- shape = eta2theta(eta[, 1], .lshape, earg = .eshape)
- scale = eta2theta(eta[, 2], .lscale, earg = .escale)
+ shape = eta2theta(eta[, 1], .lshape , earg = .eshape )
+ scale = eta2theta(eta[, 2], .lscale , earg = .escale )
(digamma(shape+1)-digamma(1)) / scale
}, list( .lshape = lshape, .lscale = lscale,
- .eshape = eshape, .escale = escale ))),
- last = eval(substitute(expression({
- misc$link = c("shape" = .lshape, "scale" = .lscale)
- misc$earg = list("shape" = .eshape, "scale" = .escale)
- misc$expected = TRUE
- }), list( .lshape = lshape, .lscale = lscale,
- .eshape = eshape, .escale = escale ))),
- loglikelihood= eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- shape = eta2theta(eta[, 1], .lshape, earg = .eshape)
- scale = eta2theta(eta[, 2], .lscale, earg = .escale)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else
- sum(w * (log(shape) + log(scale) +
- (shape-1)*log1p(-exp(-scale*y)) - scale*y))
- }, list( .lscale = lscale, .lshape = lshape,
- .eshape = eshape, .escale = escale ))),
- vfamily = c("expexp"),
- deriv = eval(substitute(expression({
- shape = eta2theta(eta[, 1], .lshape, earg = .eshape)
- scale = eta2theta(eta[, 2], .lscale, earg = .escale)
- dl.dscale = 1/scale + (shape-1)*y*exp(-scale*y) / (-expm1(-scale*y)) - y
- dl.dshape = 1/shape + log1p(-exp(-scale*y))
- dscale.deta = dtheta.deta(scale, .lscale, earg = .escale)
- dshape.deta = dtheta.deta(shape, .lshape, earg = .eshape)
- c(w) * cbind(dl.dshape * dshape.deta,
- dl.dscale * dscale.deta)
- }), list( .lshape = lshape, .lscale = lscale,
- .eshape = eshape, .escale = escale ))),
- weight = eval(substitute(expression({
+ .eshape = eshape, .escale = escale))),
+ last = eval(substitute(expression({
+ misc$link = c("shape" = .lshape , "scale" = .lscale)
+ misc$earg = list("shape" = .eshape, "scale" = .escale )
+
+ misc$expected = TRUE
+ }), list( .lshape = lshape, .lscale = lscale,
+ .eshape = eshape, .escale = escale))),
+ loglikelihood= eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ shape = eta2theta(eta[, 1], .lshape , earg = .eshape )
+ scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else
+ sum(c(w) * (log(shape) + log(scale) +
+ (shape-1)*log1p(-exp(-scale*y)) - scale*y))
+ }, list( .lscale = lscale, .lshape = lshape,
+ .eshape = eshape, .escale = escale))),
+ vfamily = c("expexp"),
+ deriv = eval(substitute(expression({
+ shape = eta2theta(eta[, 1], .lshape , earg = .eshape )
+ scale = eta2theta(eta[, 2], .lscale , earg = .escale )
+
+ dl.dscale = 1/scale + (shape-1)*y*exp(-scale*y) / (-expm1(-scale*y)) - y
+ dl.dshape = 1/shape + log1p(-exp(-scale*y))
+
+ dscale.deta = dtheta.deta(scale, .lscale , earg = .escale )
+ dshape.deta = dtheta.deta(shape, .lshape , earg = .eshape )
+
+ c(w) * cbind(dl.dshape * dshape.deta,
+ dl.dscale * dscale.deta)
+ }), list( .lshape = lshape, .lscale = lscale,
+ .eshape = eshape, .escale = escale))),
+ weight = eval(substitute(expression({
d11 = 1 / shape^2 # True for all shape
d22 = d12 = rep(as.numeric(NA), length.out = n)
index2 = abs(shape - 2) > .tolerance # index2 = shape != 1
@@ -9437,351 +10621,325 @@ erfc = function(x)
d12[!index1] = -sum(1/(2 + (0:largeno))^2) / Scale
}
wz = matrix(0, n, dimm(M))
- wz[,iam(1,1,M)] = dshape.deta^2 * d11
- wz[,iam(2,2,M)] = dscale.deta^2 * d22
- wz[,iam(1,2,M)] = dscale.deta * dshape.deta * d12
+ wz[, iam(1, 1, M)] = dshape.deta^2 * d11
+ wz[, iam(2, 2, M)] = dscale.deta^2 * d22
+ wz[, iam(1, 2, M)] = dscale.deta * dshape.deta * d12
c(w) * wz
- }), list( .tolerance=tolerance ))))
+ }), list( .tolerance = tolerance ))))
}
- expexp1 = function(lscale = "loge",
- escale = list(),
- iscale = NULL,
- ishape = 1) {
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
+ expexp1 <- function(lscale = "loge",
+ iscale = NULL,
+ ishape = 1) {
- if (!is.list(escale)) escale = list()
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
- if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
- stop("bad input for argument 'iscale'")
- new("vglmff",
- blurb = c("Exponentiated Exponential Distribution",
- " (profile likelihood estimation)\n",
- "Links: ",
- namesof("scale", lscale, earg = escale), "\n",
- "Mean: (digamma(shape+1)-digamma(1))/scale"),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- namesof("scale", .lscale, earg = .escale, short = TRUE)
+ if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
+ stop("bad input for argument 'iscale'")
- if (length(w) != n ||
- !is.Numeric(w, integer.valued = TRUE, positive = TRUE))
- stop("argument 'weights' must be a vector of positive integers")
- if (!intercept.only)
- stop("this family function only works for an ",
- "intercept-only, i.e., y ~ 1")
- extra$yvector = y
- extra$sumw = sum(w)
- extra$w = w
- if (!length(etastart)) {
- shape.init = if (!is.Numeric( .ishape, positive = TRUE))
- stop("argument 'ishape' must be positive") else
- rep( .ishape, length.out = n)
- scaleinit = if (length( .iscale))
- rep( .iscale, length.out = n) else
- (digamma(shape.init+1) - digamma(1)) / (y+1/8)
- etastart = cbind(theta2eta(scaleinit, .lscale, earg = .escale))
- }
- }), list( .lscale = lscale, .iscale = iscale, .ishape = ishape,
- .escale = escale ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- scale = eta2theta(eta, .lscale, earg = .escale)
- temp7 = -expm1(-scale*extra$yvector)
- shape = -extra$sumw / sum(extra$w*log(temp7)) # \gamma(\theta)
- (digamma(shape+1)-digamma(1)) / scale
- }, list( .lscale = lscale,
- .escale = escale ))),
- last = eval(substitute(expression({
- misc$link = c("scale" = .lscale)
- misc$earg = list("scale" = .escale)
- temp7 = -expm1(-scale*y)
- shape = -extra$sumw / sum(w*log(temp7)) # \gamma(\theta)
- misc$shape = shape # Store the ML estimate here
- misc$pooled.weight = pooled.weight
- }), list( .lscale = lscale, .escale = escale ))),
- loglikelihood= eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- scale = eta2theta(eta, .lscale, earg = .escale)
- temp7 = -expm1(-scale*y)
- shape = -extra$sumw / sum(w*log(temp7)) # \gamma(\theta)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else
- sum(w * (log(shape) + log(scale) +
- (shape-1)*log1p(-exp(-scale*y)) - scale*y))
- }, list( .lscale = lscale, .escale = escale ))),
- vfamily = c("expexp1"),
- deriv = eval(substitute(expression({
- scale = eta2theta(eta, .lscale, earg = .escale)
- temp6 = exp(-scale*y)
- temp7 = 1-temp6
- shape = -extra$sumw / sum(w*log(temp7)) # \gamma(\theta)
- d1 = 1/scale + (shape-1)*y*temp6/temp7 - y
- c(w) * cbind(d1 * dtheta.deta(scale, .lscale, earg = .escale))
- }), list( .lscale = lscale, .escale = escale ))),
- weight = eval(substitute(expression({
- d11 = 1/scale^2 + y*(temp6/temp7^2) * ((shape-1) *
- (y*temp7+temp6) - y*temp6 / (log(temp7))^2)
- wz = matrix(0, n, dimm(M))
- wz[,iam(1,1,M)] = dtheta.deta(scale, .lscale, earg = .escale)^2 * d11 -
- d2theta.deta2(scale, .lscale, earg = .escale) * d1
-
- if (FALSE && intercept.only) {
- sumw = sum(w)
- for(ii in 1:ncol(wz))
- wz[,ii] = sum(wz[,ii]) / sumw
- pooled.weight = TRUE
- wz = c(w) * wz # Put back the weights
- } else
- pooled.weight = FALSE
- c(w) * wz
- }), list( .lscale = lscale, .escale = escale ))))
-}
+ new("vglmff",
+ blurb = c("Exponentiated Exponential Distribution",
+ " (profile likelihood estimation)\n",
+ "Links: ",
+ namesof("scale", lscale, earg = escale), "\n",
+ "Mean: (digamma(shape+1)-digamma(1))/scale"),
+ initialize = eval(substitute(expression({
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1, ncol.y.max = 1)
-betaffqn.control <- function(save.weight = TRUE, ...)
-{
- list(save.weight = save.weight)
+
+
+ predictors.names <-
+ namesof("scale", .lscale , earg = .escale , short = TRUE)
+
+ if (length(w) != n ||
+ !is.Numeric(w, integer.valued = TRUE, positive = TRUE))
+ stop("argument 'weights' must be a vector of positive integers")
+
+ if (!intercept.only)
+ stop("this family function only works for an ",
+ "intercept-only, i.e., y ~ 1")
+ extra$yvector = y
+ extra$sumw = sum(w)
+ extra$w = w
+
+ if (!length(etastart)) {
+ shape.init = if (!is.Numeric( .ishape, positive = TRUE))
+ stop("argument 'ishape' must be positive") else
+ rep( .ishape, length.out = n)
+ scaleinit = if (length( .iscale ))
+ rep( .iscale, length.out = n) else
+ (digamma(shape.init+1) - digamma(1)) / (y+1/8)
+ etastart <-
+ cbind(theta2eta(scaleinit, .lscale , earg = .escale ))
+ }
+ }), list( .lscale = lscale, .iscale = iscale, .ishape = ishape,
+ .escale = escale))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ scale = eta2theta(eta, .lscale , earg = .escale )
+ temp7 = -expm1(-scale*extra$yvector)
+ shape = -extra$sumw / sum(extra$w*log(temp7)) # \gamma(\theta)
+ (digamma(shape+1)-digamma(1)) / scale
+ }, list( .lscale = lscale,
+ .escale = escale))),
+ last = eval(substitute(expression({
+ misc$link = c("scale" = .lscale)
+ misc$earg = list("scale" = .escale )
+
+ temp7 = -expm1(-scale*y)
+ shape = -extra$sumw / sum(w*log(temp7)) # \gamma(\theta)
+ misc$shape = shape # Store the ML estimate here
+ misc$pooled.weight = pooled.weight
+ }), list( .lscale = lscale, .escale = escale))),
+ loglikelihood= eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ scale = eta2theta(eta, .lscale , earg = .escale )
+ temp7 = -expm1(-scale*y)
+ shape = -extra$sumw / sum(w*log(temp7)) # \gamma(\theta)
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else
+ sum(c(w) * (log(shape) + log(scale) +
+ (shape-1)*log1p(-exp(-scale*y)) - scale*y))
+ }, list( .lscale = lscale, .escale = escale))),
+ vfamily = c("expexp1"),
+ deriv = eval(substitute(expression({
+ scale = eta2theta(eta, .lscale , earg = .escale )
+
+ temp6 = exp(-scale*y)
+ temp7 = 1-temp6
+ shape = -extra$sumw / sum(w*log(temp7)) # \gamma(\theta)
+ d1 = 1/scale + (shape-1)*y*temp6/temp7 - y
+
+ c(w) * cbind(d1 * dtheta.deta(scale, .lscale , earg = .escale ))
+ }), list( .lscale = lscale, .escale = escale))),
+ weight = eval(substitute(expression({
+ d11 = 1/scale^2 + y*(temp6/temp7^2) * ((shape-1) *
+ (y*temp7+temp6) - y*temp6 / (log(temp7))^2)
+
+ wz = matrix(0, n, dimm(M))
+ wz[, iam(1, 1, M)] =
+ dtheta.deta(scale, .lscale , earg = .escale )^2 * d11 -
+ d2theta.deta2(scale, .lscale , earg = .escale ) * d1
+
+ if (FALSE && intercept.only) {
+ sumw = sum(w)
+ for(ii in 1:ncol(wz))
+ wz[, ii] = sum(wz[, ii]) / sumw
+ pooled.weight = TRUE
+ wz = c(w) * wz # Put back the weights
+ } else
+ pooled.weight = FALSE
+ c(w) * wz
+ }), list( .lscale = lscale, .escale = escale))))
}
- if (FALSE)
- betaffqn = function(link = "loge", earg = list(),
- i1 = NULL, i2 = NULL, trim=0.05, A=0, B=1)
-{
- if (mode(link) != "character" && mode(link) != "name")
- link = as.character(substitute(link))
- if (!is.Numeric(A, allowable.length = 1) ||
- !is.Numeric(B, allowable.length = 1) ||
- A >= B)
- stop("A must be < B, and both must be of length one")
- stdbeta = (A == 0 && B == 1) # stdbeta==T iff standard beta distribution
- if (!is.list(earg)) earg = list()
- new("vglmff",
- blurb = c("Two-parameter Beta distribution\n",
- if (stdbeta)
- "y^(shape1-1) * (1-y)^(shape2-1), 0<=y <= 1, shape1>0, shape2>0\n\n"
- else
- paste("(y-",A,")^(shape1-1) * (", B,
- "-y)^(shape2-1), ",A,"<=y <= ", B,
- " shape1>0, shape2>0\n\n", sep = ""),
- "Links: ",
- namesof("shape1", link, earg = earg), ", ",
- namesof("shape2", link, earg = earg)),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- if (min(y) <= .A || max(y) >= .B)
- stop("data not within (A, B)")
- predictors.names =
- c(namesof("shape1", .link, earg = .earg, short = TRUE),
- namesof("shape2", .link, earg = .earg, short = TRUE))
- if (is.numeric( .i1) && is.numeric( .i2)) {
- vec = c( .i1, .i2)
- vec = c(theta2eta(vec[1], .link, earg = .earg ),
- theta2eta(vec[2], .link, earg = .earg ))
- etastart = matrix(vec, n, 2, byrow= TRUE)
- }
- # For QN update below
- if (length(w) != n || !is.Numeric(w, positive = TRUE))
- stop("weights must be a vector of positive weights")
- if (!length(etastart)) {
- mu1d = mean(y, trim=.trim)
- uu = (mu1d-.A) / ( .B - .A)
- DD = ( .B - .A)^2
- pinit = uu^2 * (1-uu)*DD/var(y) - uu # But var(y) is not robust
- qinit = pinit * (1-uu) / uu
- etastart = matrix(theta2eta(c(pinit,qinit), .link, earg = .earg ),
- n,2,byrow = TRUE)
- }
- }), list( .link = link, .earg = earg,
- .i1 = i1, .i2 = i2, .trim = trim, .A = A, .B = B ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- shapes = eta2theta(eta, .link, earg = .earg )
- .A + ( .B-.A) * shapes[, 1] / (shapes[, 1] + shapes[, 2])
- }, list( .link = link, .earg = earg, .A = A, .B = B ))),
- last = eval(substitute(expression({
- misc$link = c(shape1 = .link, shape2 = .link)
- misc$earg = list(shape1 = .earg, shape2 = .earg )
- misc$limits = c( .A, .B)
- misc$expected = FALSE
- misc$BFGS = TRUE
- }), list( .link = link, .earg = earg, .A = A, .B = B ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL){
- shapes = eta2theta(eta, .link, earg = .earg )
- temp = lbeta(shapes[, 1], shapes[, 2])
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(w * ((shapes[, 1]-1)*log(y-.A) +
- (shapes[, 2]-1)*log( .B-y) - temp -
- (shapes[, 1]+shapes[, 2]-1)*log( .B-.A )))
- }
- }, list( .link = link, .earg = earg, .A = A, .B = B ))),
- vfamily = "betaffqn",
- deriv = eval(substitute(expression({
- shapes = eta2theta(eta, .link, earg = .earg )
- dshapes.deta = dtheta.deta(shapes, .link, earg = .earg )
- dl.dshapes = cbind(log(y-.A), log( .B-y)) - digamma(shapes) +
- digamma(shapes[, 1] + shapes[, 2]) - log( .B - .A)
- if (iter == 1) {
- etanew = eta
- } else {
- derivold = derivnew
- etaold = etanew
- etanew = eta
- }
- derivnew = c(w) * dl.dshapes * dshapes.deta
- derivnew
- }), list( .link = link, .earg = earg, .A = A, .B = B ))),
- weight = expression({
- if (iter == 1) {
- wznew = cbind(matrix(w, n, M), matrix(0, n, dimm(M)-M))
- } else {
- wzold = wznew
- wznew = qnupdate(w = w, wzold=wzold,
- dderiv=(derivold - derivnew),
- deta=etanew-etaold, M = M,
- trace=trace) # weights incorporated in args
- }
- wznew
- }))
-}
+ logistic2 <- function(llocation = "identity",
+ lscale = "loge",
+ ilocation = NULL, iscale = NULL,
+ imethod = 1, zero = -2) {
+ ilocat <- ilocation
+ if (!is.Numeric(imethod, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE) ||
+ imethod > 2)
+ stop("argument 'imethod' must be 1 or 2")
+ if (length(zero) &&
+ !is.Numeric(zero, integer.valued = TRUE))
+ stop("bad input for argument 'zero'")
+ if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
+ stop("bad input for argument 'iscale'")
- logistic2 = function(llocation = "identity",
- lscale = "loge",
- elocation = list(),
- escale = list(),
- ilocation = NULL, iscale = NULL,
- imethod = 1, zero = NULL) {
- if (mode(llocation) != "character" && mode(llocation) != "name")
- llocation = as.character(substitute(llocation))
- if (mode(lscale) != "character" && mode(lscale) != "name")
- lscale = as.character(substitute(lscale))
- if (!is.Numeric(imethod, allowable.length = 1,
- integer.valued = TRUE, positive = TRUE) ||
- imethod > 2)
- stop("argument 'imethod' must be 1 or 2")
- if (length(zero) &&
- !is.Numeric(zero, integer.valued = TRUE, positive = TRUE))
- stop("bad input for argument 'zero'")
- if (length(iscale) && !is.Numeric(iscale, positive = TRUE))
- stop("bad input for argument 'iscale'")
+ llocat <- as.list(substitute(llocation))
+ elocat <- link2list(llocat)
+ llocat <- attr(elocat, "function.name")
- if (!is.list(elocation)) elocation = list()
- if (!is.list(escale)) escale = list()
+ lscale <- as.list(substitute(lscale))
+ escale <- link2list(lscale)
+ lscale <- attr(escale, "function.name")
- new("vglmff",
- blurb = c("Two-parameter logistic distribution\n\n",
- "Links: ",
- namesof("location", llocation, earg = elocation), ", ",
- namesof("scale", lscale, earg = escale),
- "\n", "\n",
- "Mean: location", "\n",
- "Variance: (pi*scale)^2 / 3"),
- constraints = eval(substitute(expression({
- constraints = cm.zero.vgam(constraints, x, .zero, M)
- }), list( .zero = zero ))),
- initialize = eval(substitute(expression({
- if (ncol(cbind(y)) != 1)
- stop("response must be a vector or a one-column matrix")
- predictors.names =
- c(namesof("location", .llocation, earg = .elocation, tag = FALSE),
- namesof("scale", .lscale, earg = .escale, tag = FALSE))
- if (!length(etastart)) {
- if ( .imethod == 1) {
- location.init = y
- scale.init = sqrt(3) * sd(y) / pi
- } else {
- location.init = median(rep(y, w))
- scale.init = sqrt(3) *
- sum(w*(y-location.init)^2) / (sum(w)*pi)
- }
- location.init = if (length( .ilocation))
- rep( .ilocation, length.out = n) else
- rep(location.init, length.out = n)
- if ( .llocation == "loge")
- location.init = abs(location.init) + 0.001
- scale.init = if (length( .iscale))
- rep( .iscale, length.out = n) else
- rep(1, length.out = n)
- etastart = cbind(
- theta2eta(location.init, .llocation, earg = .elocation),
- theta2eta(scale.init, .lscale, earg = .escale))
- }
- }), list( .imethod = imethod,
- .elocation = elocation, .escale = escale,
- .llocation = llocation, .lscale = lscale,
- .ilocation = ilocation, .iscale = iscale ))),
- linkinv = eval(substitute(function(eta, extra = NULL) {
- eta2theta(eta[, 1], .llocation, earg = .elocation)
- }, list( .llocation = llocation,
- .elocation = elocation, .escale = escale ))),
- last = eval(substitute(expression({
- misc$link = c(location = .llocation, scale = .lscale)
- misc$earg = list(location = .elocation, scale = .escale)
- }), list( .llocation = llocation, .lscale = lscale,
- .elocation = elocation, .escale = escale ))),
- loglikelihood = eval(substitute(
- function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- location = eta2theta(eta[, 1], .llocation, earg = .elocation)
- Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
- if (residuals) stop("loglikelihood residuals ",
- "not implemented yet") else {
- sum(w * dlogis(x = y, location = location,
- scale = Scale, log = TRUE))
+ new("vglmff",
+ blurb = c("Two-parameter logistic distribution\n\n",
+ "Links: ",
+ namesof("location", llocat, earg = elocat), ", ",
+ namesof("scale", lscale, earg = escale),
+ "\n", "\n",
+ "Mean: location", "\n",
+ "Variance: (pi * scale)^2 / 3"),
+ constraints = eval(substitute(expression({
+ dotzero <- .zero
+ Musual <- 2
+ eval(negzero.expression)
+ }), list( .zero = zero ))),
+
+ infos = eval(substitute(function(...) {
+ list(Musual = 2,
+ zero = .zero )
+ }, list( .zero = zero ))),
+
+ initialize = eval(substitute(expression({
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = Inf, ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ ncoly <- ncol(y)
+ Musual <- 2
+ extra$ncoly <- ncoly
+ extra$Musual <- Musual
+ M <- Musual * ncoly
+
+
+
+ mynames1 <- paste("location", if (ncoly > 1) 1:ncoly else "", sep = "")
+ mynames2 <- paste("scale", if (ncoly > 1) 1:ncoly else "", sep = "")
+ predictors.names <-
+ c(namesof(mynames1, .llocat , earg = .elocat , tag = FALSE),
+ namesof(mynames2, .lscale , earg = .escale , tag = FALSE))[
+ interleave.VGAM(M, M = Musual)]
+
+
+ if (!length(etastart)) {
+ if ( .imethod == 1) {
+ locat.init = y
+ scale.init = sqrt(3) * apply(y, 2, sd) / pi
+ } else {
+ locat.init = scale.init = NULL
+ for(ii in 1:ncoly) {
+ locat.init = c(locat.init, median(rep(y[, ii], w[, ii])))
+ scale.init = c(scale.init, sqrt(3) * sum(w[, ii] *
+ (y[, ii] - locat.init[ii])^2) / (sum(w[, ii]) * pi))
}
- }, list( .llocation = llocation, .lscale = lscale,
- .elocation = elocation, .escale = escale ))),
- vfamily = c("logistic2"),
- deriv = eval(substitute(expression({
- location = eta2theta(eta[, 1], .llocation, earg = .elocation)
- Scale = eta2theta(eta[, 2], .lscale, earg = .escale)
- zedd = (y-location) / Scale
- ezedd = exp(-zedd)
- dl.dlocation = (1-ezedd) / ((1 + ezedd) * Scale)
- dlocation.deta = dtheta.deta(location, .llocation, earg = .elocation)
- dl.dscale = zedd * (1-ezedd) / ((1 + ezedd) * Scale) - 1/Scale
- dscale.deta = dtheta.deta(Scale, .lscale, earg = .escale)
- c(w) * cbind(dl.dlocation * dlocation.deta,
- dl.dscale * dscale.deta)
- }), list( .llocation = llocation, .lscale = lscale,
- .elocation = elocation, .escale = escale ))),
- weight = eval(substitute(expression({
- d2l.location2 = 1 / (3*Scale^2)
- d2l.dscale2 = (3 + pi^2) / (9*Scale^2)
- wz = matrix(as.numeric(NA), nrow=n, ncol=M) # diagonal
- wz[,iam(1,1,M)] = d2l.location2 * dlocation.deta^2
- wz[,iam(2,2,M)] = d2l.dscale2 * dscale.deta^2
- c(w) * wz
- }), list( .llocation = llocation, .lscale = lscale,
- .elocation = elocation, .escale = escale ))))
+ }
+ locat.init = matrix(if (length( .ilocat )) .ilocat else
+ locat.init, n, ncoly, byrow = TRUE)
+ if ( .llocat == "loge")
+ locat.init = abs(locat.init) + 0.001
+
+
+ scale.init = matrix(if (length( .iscale )) .iscale else
+ scale.init, n, ncoly, byrow = TRUE)
+
+ etastart <- cbind(
+ theta2eta(locat.init, .llocat , earg = .elocat ),
+ theta2eta(scale.init, .lscale , earg = .escale ))[,
+ interleave.VGAM(M, M = Musual)]
+ }
+ }), list( .imethod = imethod,
+ .elocat = elocat, .escale = escale,
+ .llocat = llocat, .lscale = lscale,
+ .ilocat = ilocat, .iscale = iscale ))),
+ linkinv = eval(substitute(function(eta, extra = NULL) {
+ M <- ncol(eta)
+ Musual <- 2
+ ncoly <- M / Musual
+ eta2theta(eta[, (1:ncoly) * Musual - 1], .llocat , earg = .elocat )
+ }, list( .llocat = llocat,
+ .elocat = elocat ))),
+
+ last = eval(substitute(expression({
+ Musual <- extra$Musual
+ misc$link <-
+ c(rep( .llocat , length = ncoly),
+ rep( .lscale , length = ncoly))[interleave.VGAM(M, M = Musual)]
+ temp.names <- c(mynames1, mynames2)[
+ interleave.VGAM(M, M = Musual)]
+ names(misc$link) <- temp.names
+
+ misc$earg <- vector("list", M)
+ names(misc$earg) <- temp.names
+ for(ii in 1:ncoly) {
+ misc$earg[[Musual*ii-1]] <- .elocat
+ misc$earg[[Musual*ii ]] <- .escale
+ }
+
+ misc$Musual <- Musual
+ misc$imethod <- .imethod
+ misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
+ }), list( .imethod = imethod,
+ .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale))),
+ loglikelihood = eval(substitute(
+ function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
+ M <- ncol(eta)
+ Musual <- 2
+ ncoly <- M / Musual
+
+ locat = eta2theta(eta[, (1:ncoly)*Musual-1], .llocat , earg = .elocat )
+ Scale = eta2theta(eta[, (1:ncoly)*Musual ], .lscale , earg = .escale )
+ if (residuals) stop("loglikelihood residuals ",
+ "not implemented yet") else {
+ sum(c(w) * dlogis(x = y, location = locat,
+ scale = Scale, log = TRUE))
+ }
+ }, list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale))),
+ vfamily = c("logistic2"),
+ deriv = eval(substitute(expression({
+ Musual <- 2
+ ncoly <- M / Musual
+
+ locat = eta2theta(eta[, (1:ncoly)*Musual-1], .llocat , earg = .elocat )
+ Scale = eta2theta(eta[, (1:ncoly)*Musual ], .lscale , earg = .escale )
+
+ zedd = (y - locat) / Scale
+ ezedd = exp(-zedd)
+ dl.dlocat = (-expm1(-zedd)) / ((1 + ezedd) * Scale)
+ dl.dscale = zedd * (-expm1(-zedd)) / ((1 + ezedd) * Scale) -
+ 1 / Scale
+
+ dlocat.deta = dtheta.deta(locat, .llocat , earg = .elocat )
+ dscale.deta = dtheta.deta(Scale, .lscale , earg = .escale )
+
+ c(w) * cbind(dl.dlocat * dlocat.deta,
+ dl.dscale * dscale.deta)[, interleave.VGAM(M, M = Musual)]
+ }), list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale))),
+ weight = eval(substitute(expression({
+ ned2l.dlocat2 = 1 / (3 * Scale^2)
+ ned2l.dscale2 = (3 + pi^2) / (9 * Scale^2)
+
+ wz = matrix(as.numeric(NA), nrow = n, ncol = M) # diagonal
+ wz[, (1:ncoly) * Musual - 1] = ned2l.dlocat2 * dlocat.deta^2
+ wz[, (1:ncoly) * Musual ] = ned2l.dscale2 * dscale.deta^2
+
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = ncoly)
+ }), list( .llocat = llocat, .lscale = lscale,
+ .elocat = elocat, .escale = escale))))
}
@@ -9790,26 +10948,31 @@ betaffqn.control <- function(save.weight = TRUE, ...)
- negbinomial.size = function(size = Inf,
- lmu = "loge",
- emu = list(),
- imu = NULL,
- quantile.probs = 0.75,
- imethod = 1,
- shrinkage.init = 0.95, zero = NULL)
+ negbinomial.size <- function(size = Inf,
+ lmu = "loge",
+ imu = NULL,
+ probs.y = 0.75,
+ imethod = 1,
+ shrinkage.init = 0.95, zero = NULL)
{
+
+
if (any(size <= 0))
stop("bad input for argument 'size'")
if (any(is.na(size)))
stop("bad input for argument 'size'")
- if (mode(lmu) != "character" && mode(lmu) != "name")
- lmu = as.character(substitute(lmu))
+
+ lmu <- as.list(substitute(lmu))
+ emu <- link2list(lmu)
+ lmu <- attr(emu, "function.name")
+
+
if (length(imu) && !is.Numeric(imu, positive = TRUE))
@@ -9825,6 +10988,8 @@ betaffqn.control <- function(save.weight = TRUE, ...)
stop("bad input for argument 'shrinkage.init'")
+
+
ans =
new("vglmff",
@@ -9851,16 +11016,26 @@ betaffqn.control <- function(save.weight = TRUE, ...)
Musual <- 1
if (any(y < 0))
- stop("negative values not allowed for the 'negbinomial' family")
- if (any(round(y) != y))
- stop("integer-values only allowed for the 'negbinomial' family")
+ stop("negative values not allowed for the 'negbinomial.size' family")
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ Is.nonnegative.y = TRUE,
+ Is.integer.y = TRUE,
+ ncol.w.max = Inf, ncol.y.max = Inf,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
- y = as.matrix(y)
M = Musual * ncol(y)
- NOS = ncoly = ncol(y) # Number of species
- predictors.names =
- c(namesof(if (NOS == 1) "mu" else paste("mu", 1:NOS, sep = ""),
- .lmu, earg = .emu, tag = FALSE))
+ NOS = ncoly = ncol(y) # Number of species
+ mynames1 <- paste("mu", if (NOS > 1) 1:NOS else "", sep = "")
+ predictors.names <-
+ namesof(mynames1, .lmu , earg = .emu , tag = FALSE)
if (is.numeric( .mu.init ))
@@ -9871,20 +11046,22 @@ betaffqn.control <- function(save.weight = TRUE, ...)
mu.init = y
for(iii in 1:ncol(y)) {
use.this = if ( .imethod == 1) {
- weighted.mean(y[, iii], w) + 1/16
+ weighted.mean(y[, iii], w[, iii]) + 1/16
} else if ( .imethod == 3) {
- c(quantile(y[, iii], probs = .quantile.probs) + 1/16)
+ c(quantile(y[, iii], probs = .probs.y) + 1/16)
} else {
- median(y[, iii]) + 1/16
+ median(y[, iii]) + 1/16
}
if (is.numeric( .mu.init )) {
- mu.init[, iii] = MU.INIT[, iii]
+ mu.init[, iii] = MU.INIT[, iii]
} else {
medabsres = median(abs(y[, iii] - use.this)) + 1/32
- allowfun = function(z, maxtol=1) sign(z)*pmin(abs(z), maxtol)
+ allowfun <- function(z, maxtol = 1)
+ sign(z)*pmin(abs(z), maxtol)
mu.init[, iii] = use.this + (1 - .sinit) *
- allowfun(y[, iii] - use.this, maxtol = medabsres)
+ allowfun(y[, iii] - use.this,
+ maxtol = medabsres)
mu.init[, iii] = abs(mu.init[, iii]) + 1 / 1024
}
@@ -9893,17 +11070,23 @@ betaffqn.control <- function(save.weight = TRUE, ...)
kmat = matrix( .size , n, NOS, byrow = TRUE)
- newemu = if ( .lmu == "nbcanlink") {
- c(list(size = kmat), .emu)
- } else {
- .emu
+
+
+
+ newemu <- .emu
+ if ( .lmu == "nbcanlink") {
+ newemu$size <- kmat
}
- etastart = cbind(theta2eta(mu.init , link = .lmu , earg = newemu ))
+
+
+
+ etastart <-
+ cbind(theta2eta(mu.init , link = .lmu , earg = newemu ))
}
}), list( .lmu = lmu,
.emu = emu,
.mu.init = imu,
- .size = size, .quantile.probs = quantile.probs,
+ .size = size, .probs.y = probs.y,
.sinit = shrinkage.init,
.zero = zero, .imethod = imethod ))),
@@ -9915,29 +11098,32 @@ betaffqn.control <- function(save.weight = TRUE, ...)
kmat = matrix( .size , n, NOS, byrow = TRUE)
- newemu = if ( .lmu == "nbcanlink") {
- c(list(size = kmat), .emu)
- } else {
- .emu
+
+
+
+
+ newemu <- .emu
+ if ( .lmu == "nbcanlink") {
+ newemu$size <- kmat
}
+
eta2theta(eta, .lmu , earg = newemu)
}, list( .lmu = lmu,
- .size = size,
- .emu = emu ))),
+ .emu = emu,
+ .size = size ))),
last = eval(substitute(expression({
+ misc$link <- rep( .lmu , length = NOS)
+ names(misc$link) <- mynames1
- temp0303 = c(rep( .lmu, length = NOS))
- names(temp0303) = c(if (NOS == 1) "mu" else
- paste("mu", 1:NOS, sep = ""))
- misc$link = temp0303 # Already named
misc$earg = vector("list", M)
- names(misc$earg) = names(misc$link)
+ names(misc$earg) = mynames1
for(ii in 1:NOS) {
- misc$earg[[ii]] = newemu
+ misc$earg[[ii]] = newemu
}
+
misc$imethod = .imethod
misc$expected = TRUE
misc$shrinkage.init = .sinit
@@ -9984,17 +11170,20 @@ betaffqn.control <- function(save.weight = TRUE, ...)
NOS = M = ncol(eta)
kmat = matrix( .size , n, M, byrow = TRUE)
- newemu = if ( .lmu == "nbcanlink") {
- c(list(size = kmat), .emu)
- } else {
- .emu
+
+
+ newemu <- .emu
+ if ( .lmu == "nbcanlink") {
+ newemu$size <- kmat
}
+
dl.dmu = y/mu - (y+kmat)/(kmat+mu)
dl.dmu[!is.finite(dl.dmu)] = (y/mu)[!is.finite(dl.dmu)] - 1
- dmu.deta = dtheta.deta(mu, .lmu ,
- earg = c(list(wrt.eta = 1), newemu)) # eta1
+ if ( .lmu == "nbcanlink")
+ newemu$wrt.eta <- 1
+ dmu.deta = dtheta.deta(mu, .lmu , earg = newemu) # eta1
myderiv = c(w) * dl.dmu * dmu.deta
myderiv
@@ -10005,11 +11194,12 @@ betaffqn.control <- function(save.weight = TRUE, ...)
weight = eval(substitute(expression({
wz = matrix(as.numeric(NA), n, M) # wz is 'diagonal'
- ed2l.dmu2 = 1 / mu - 1 / (mu + kmat)
- wz = dmu.deta^2 * ed2l.dmu2
+ ned2l.dmu2 = 1 / mu - 1 / (mu + kmat)
+ wz = dmu.deta^2 * ned2l.dmu2
- c(w) * wz
+
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = NOS)
}), list( .lmu = lmu ))))
ans
@@ -10021,7 +11211,3 @@ betaffqn.control <- function(save.weight = TRUE, ...)
-
-
-
-
diff --git a/R/family.zeroinf.R b/R/family.zeroinf.R
index 9ee9fc6..7012fd1 100644
--- a/R/family.zeroinf.R
+++ b/R/family.zeroinf.R
@@ -12,7 +12,7 @@
-dzanegbin = function(x, size, prob = NULL, munb = NULL, pobs0 = 0,
+dzanegbin <- function(x, size, prob = NULL, munb = NULL, pobs0 = 0,
log = FALSE) {
if (length(munb)) {
if (length(prob))
@@ -20,15 +20,15 @@ dzanegbin = function(x, size, prob = NULL, munb = NULL, pobs0 = 0,
prob <- size / (size + munb)
}
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
LLL = max(length(x), length(pobs0), length(prob), length(size))
- if (length(x) != LLL) x = rep(x, len = LLL)
- if (length(pobs0) != LLL) pobs0 = rep(pobs0, len = LLL);
- if (length(prob) != LLL) prob = rep(prob, len = LLL)
- if (length(size) != LLL) size = rep(size, len = LLL);
+ if (length(x) != LLL) x <- rep(x, len = LLL)
+ if (length(pobs0) != LLL) pobs0 <- rep(pobs0, len = LLL);
+ if (length(prob) != LLL) prob <- rep(prob, len = LLL)
+ if (length(size) != LLL) size <- rep(size, len = LLL);
ans = rep(0.0, len = LLL)
if (!is.Numeric(pobs0) || any(pobs0 < 0) || any(pobs0 > 1))
@@ -37,24 +37,24 @@ dzanegbin = function(x, size, prob = NULL, munb = NULL, pobs0 = 0,
stop("argument 'prob' must be in [0,Inf)")
if (!is.Numeric(size, positive = TRUE))
stop("argument 'size' must be in [0,Inf)")
- index0 = x == 0
+ index0 <- x == 0
if (log.arg) {
- ans[ index0] = log(pobs0[index0])
- ans[!index0] = log1p(-pobs0[!index0]) +
- dposnegbin(x[!index0], prob = prob[!index0],
- size = size[!index0], log = TRUE)
+ ans[ index0] <- log(pobs0[index0])
+ ans[!index0] <- log1p(-pobs0[!index0]) +
+ dposnegbin(x[!index0], prob = prob[!index0],
+ size = size[!index0], log = TRUE)
} else {
- ans[ index0] = pobs0[index0]
- ans[!index0] = (1-pobs0[!index0]) * dposnegbin(x[!index0],
- prob = prob[!index0], size = size[!index0])
+ ans[ index0] <- pobs0[index0]
+ ans[!index0] <- (1-pobs0[!index0]) * dposnegbin(x[!index0],
+ prob = prob[!index0], size = size[!index0])
}
ans
}
-pzanegbin = function(q, size, prob = NULL, munb = NULL, pobs0 = 0) {
+pzanegbin <- function(q, size, prob = NULL, munb = NULL, pobs0 = 0) {
if (length(munb)) {
if (length(prob))
stop("arguments 'prob' and 'munb' both specified")
@@ -80,7 +80,7 @@ pzanegbin = function(q, size, prob = NULL, munb = NULL, pobs0 = 0) {
}
-qzanegbin = function(p, size, prob = NULL, munb = NULL, pobs0 = 0) {
+qzanegbin <- function(p, size, prob = NULL, munb = NULL, pobs0 = 0) {
if (length(munb)) {
if (length(prob))
stop("arguments 'prob' and 'munb' both specified")
@@ -107,7 +107,7 @@ qzanegbin = function(p, size, prob = NULL, munb = NULL, pobs0 = 0) {
}
-rzanegbin = function(n, size, prob = NULL, munb = NULL, pobs0 = 0) {
+rzanegbin <- function(n, size, prob = NULL, munb = NULL, pobs0 = 0) {
use.n = if ((length.n <- length(n)) > 1) length.n else
if (!is.Numeric(n, integer.valued = TRUE,
allowable.length = 1, positive = TRUE))
@@ -131,8 +131,8 @@ rzanegbin = function(n, size, prob = NULL, munb = NULL, pobs0 = 0) {
-dzapois = function(x, lambda, pobs0 = 0, log = FALSE) {
- if (!is.logical(log.arg <- log))
+dzapois <- function(x, lambda, pobs0 = 0, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -161,7 +161,7 @@ dzapois = function(x, lambda, pobs0 = 0, log = FALSE) {
-pzapois = function(q, lambda, pobs0 = 0) {
+pzapois <- function(q, lambda, pobs0 = 0) {
LLL = max(length(q), length(lambda), length(pobs0))
if (length(q) != LLL) q = rep(q, len = LLL);
if (length(lambda) != LLL) lambda = rep(lambda, len = LLL);
@@ -178,7 +178,7 @@ pzapois = function(q, lambda, pobs0 = 0) {
}
-qzapois = function(p, lambda, pobs0 = 0) {
+qzapois <- function(p, lambda, pobs0 = 0) {
LLL = max(length(p), length(lambda), length(pobs0))
if (length(p) != LLL) p = rep(p, len = LLL);
if (length(lambda) != LLL) lambda = rep(lambda, len = LLL);
@@ -195,7 +195,7 @@ qzapois = function(p, lambda, pobs0 = 0) {
}
-rzapois = function(n, lambda, pobs0 = 0) {
+rzapois <- function(n, lambda, pobs0 = 0) {
use.n = if ((length.n <- length(n)) > 1) length.n else
if (!is.Numeric(n, integer.valued = TRUE,
allowable.length = 1, positive = TRUE))
@@ -214,11 +214,11 @@ rzapois = function(n, lambda, pobs0 = 0) {
-dzipois = function(x, lambda, pstr0 = 0, log = FALSE) {
+dzipois <- function(x, lambda, pstr0 = 0, log = FALSE) {
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -252,7 +252,7 @@ dzipois = function(x, lambda, pstr0 = 0, log = FALSE) {
}
-pzipois = function(q, lambda, pstr0 = 0) {
+pzipois <- function(q, lambda, pstr0 = 0) {
LLL = max(length(pstr0), length(lambda), length(q))
if (length(pstr0) != LLL) pstr0 = rep(pstr0, len = LLL);
@@ -272,7 +272,7 @@ pzipois = function(q, lambda, pstr0 = 0) {
}
-qzipois = function(p, lambda, pstr0 = 0) {
+qzipois <- function(p, lambda, pstr0 = 0) {
LLL = max(length(p), length(lambda), length(pstr0))
ans =
@@ -308,7 +308,7 @@ qzipois = function(p, lambda, pstr0 = 0) {
}
-rzipois = function(n, lambda, pstr0 = 0) {
+rzipois <- function(n, lambda, pstr0 = 0) {
use.n = if ((length.n <- length(n)) > 1) length.n else
if (!is.Numeric(n, integer.valued = TRUE,
@@ -343,17 +343,22 @@ rzipois = function(n, lambda, pstr0 = 0) {
- yip88 = function(link.lambda = "loge", n.arg = NULL) {
+ yip88 <- function(link = "loge", n.arg = NULL) {
+
+
+
+ link <- as.list(substitute(link))
+ earg <- link2list(link)
+ link <- attr(earg, "function.name")
- if (mode(link.lambda) != "character" && mode(link.lambda) != "name")
- link.lambda = as.character(substitute(link.lambda))
new("vglmff",
blurb = c("Zero-inflated Poisson (based on Yip (1988))\n\n",
- "Link: ", namesof("lambda", link.lambda), "\n",
+ "Link: ",
+ namesof("lambda", link, earg), "\n",
"Variance: (1 - pstr0) * lambda"),
first = eval(substitute(expression({
zero <- y == 0
@@ -365,6 +370,7 @@ rzipois = function(n, lambda, pstr0 = 0) {
"(it need not be specified anyway)")
warning("trimming out the zero observations")
+
axa.save = attr(x, "assign")
x = x[!zero,, drop = FALSE]
attr(x, "assign") = axa.save # Don't lose these!!
@@ -382,10 +388,17 @@ rzipois = function(n, lambda, pstr0 = 0) {
if (sum(w) > narg)
stop("sum(w) > narg")
- predictors.names = namesof("lambda", .link.lambda, tag = FALSE)
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 1)
+
+
+ predictors.names <-
+ namesof("lambda", .link, list(theta = NULL), tag = FALSE)
+
if (!length(etastart)) {
lambda.init = rep(median(y), length = length(y))
- etastart = theta2eta(lambda.init, .link.lambda)
+ etastart = theta2eta(lambda.init, .link , earg = .earg )
}
if (length(extra)) {
extra$sumw = sum(w)
@@ -393,19 +406,20 @@ rzipois = function(n, lambda, pstr0 = 0) {
} else {
extra = list(sumw = sum(w), narg = narg)
}
- }), list( .link.lambda = link.lambda, .n.arg = n.arg ))),
+ }), list( .link = link, .earg = earg, .n.arg = n.arg ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- lambda = eta2theta(eta, .link.lambda)
+ lambda = eta2theta(eta, .link, .earg)
temp5 = exp(-lambda)
pstr0 = (1 - temp5 - extra$sumw/extra$narg) / (1 - temp5)
if (any(pstr0 <= 0))
stop("non-positive value(s) of pstr0")
- (1-pstr0) * lambda
- }, list( .link.lambda = link.lambda ))),
+ (1 - pstr0) * lambda
+ }, list( .link = link, .earg = earg ))),
last = eval(substitute(expression({
- misc$link = c(lambda = .link.lambda )
+ misc$link = c(lambda = .link )
+ misc$earg = list(lambda = .earg )
if (intercept.only) {
suma = extra$sumw
@@ -413,52 +427,52 @@ rzipois = function(n, lambda, pstr0 = 0) {
pstr0 = if (pstr0 < 0 || pstr0 > 1) NA else pstr0
misc$pstr0 = pstr0
}
- }), list( .link.lambda = link.lambda ))),
+ }), list( .link = link, .earg = earg ))),
loglikelihood = eval(substitute(function(mu, y, w, residuals = FALSE,
eta, extra = NULL) {
- lambda = eta2theta(eta, .link.lambda)
+ lambda = eta2theta(eta, .link)
temp5 = exp(-lambda)
pstr0 = (1 - temp5 - extra$sumw / extra$narg) / (1 - temp5)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dzipois(x = y, pstr0 = pstr0, lambda = lambda, log = TRUE))
+ sum(c(w) * dzipois(x = y, pstr0 = pstr0, lambda = lambda, log = TRUE))
}
- }, list( .link.lambda = link.lambda ))),
+ }, list( .link = link, .earg = earg ))),
vfamily = c("yip88"),
deriv = eval(substitute(expression({
- lambda = eta2theta(eta, .link.lambda)
+ lambda = eta2theta(eta, .link , earg = .earg )
temp5 = exp(-lambda)
dl.dlambda = -1 + y/lambda - temp5/(1-temp5)
- dlambda.deta = dtheta.deta(lambda, .link.lambda)
+ dlambda.deta = dtheta.deta(lambda, .link , earg = .earg )
w * dl.dlambda * dlambda.deta
- }), list( .link.lambda = link.lambda ))),
+ }), list( .link = link, .earg = earg ))),
weight = eval(substitute(expression({
- d2lambda.deta2 = d2theta.deta2(lambda, .link.lambda)
+ d2lambda.deta2 = d2theta.deta2(lambda, .link , earg = .earg )
d2l.dlambda2 = -y / lambda^2 + temp5 / (1 - temp5)^2
-w * (d2l.dlambda2*dlambda.deta^2 + dl.dlambda*d2lambda.deta2)
- }), list( .link.lambda = link.lambda ))))
+ }), list( .link = link, .earg = earg ))))
}
- zapoisson = function(lpobs0 = "logit", llambda = "loge",
- epobs0 = list(), elambda = list(), zero = NULL) {
+ zapoisson <- function(lpobs0 = "logit", llambda = "loge",
+ zero = NULL) {
- lpobs_0 = lpobs0
- epobs_0 = epobs0
- if (mode(lpobs_0) != "character" && mode(lpobs_0) != "name")
- lpobs_0 = as.character(substitute(lpobs_0))
- if (mode(llambda) != "character" && mode(llambda) != "name")
- llambda = as.character(substitute(llambda))
+ lpobs_0 <- as.list(substitute(lpobs0))
+ epobs_0 <- link2list(lpobs_0)
+ lpobs_0 <- attr(epobs_0, "function.name")
+
+ llambda <- as.list(substitute(llambda))
+ elambda <- link2list(llambda)
+ llambda <- attr(elambda, "function.name")
+
- if (!is.list(epobs_0)) epobs_0 = list()
- if (!is.list(elambda)) elambda = list()
new("vglmff",
blurb = c("Zero-altered Poisson ",
@@ -476,16 +490,25 @@ rzipois = function(n, lambda, pstr0 = 0) {
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(Musual = 2,
- zero = .zero)
+ zero = .zero )
}, list( .zero = zero ))),
initialize = eval(substitute(expression({
Musual <- 2
- y <- as.matrix(y)
- if (any(y != round(y )))
- stop("the response must be integer-valued")
if (any(y < 0))
stop("the response must not have negative values")
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ Is.integer.y = TRUE,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
extra$y0 = y0 = ifelse(y == 0, 1, 0)
extra$NOS = NOS = ncoly = ncol(y) # Number of species
extra$skip.these = skip.these = matrix(as.logical(y0), n, NOS)
@@ -494,14 +517,15 @@ rzipois = function(n, lambda, pstr0 = 0) {
paste("pobs0", 1:ncoly, sep = "")
mynames2 = if (ncoly == 1) "lambda" else
paste("lambda", 1:ncoly, sep = "")
- predictors.names =
+ predictors.names <-
c(namesof(mynames1, .lpobs_0, earg = .epobs_0, tag = FALSE),
namesof(mynames2, .llambda, earg = .elambda, tag = FALSE))[
interleave.VGAM(Musual*NOS, M = Musual)]
if (!length(etastart)) {
etastart =
- cbind(theta2eta((0.5 + w*y0) / (1+w), .lpobs_0, earg = .epobs_0 ),
+ cbind(theta2eta((0.5 + w*y0) / (1+w),
+ .lpobs_0, earg = .epobs_0 ),
matrix(1, n, NOS)) # 1 here is any old value
for(spp. in 1:NOS) {
sthese = skip.these[, spp.]
@@ -527,17 +551,18 @@ rzipois = function(n, lambda, pstr0 = 0) {
}, list( .lpobs_0 = lpobs_0, .llambda = llambda,
.epobs_0 = epobs_0, .elambda = elambda ))),
last = eval(substitute(expression({
+ misc$expected = TRUE
+ misc$multipleResponses <- TRUE
+
temp.names = c(rep( .lpobs_0 , len = NOS),
rep( .llambda , len = NOS))
temp.names = temp.names[interleave.VGAM(Musual*NOS, M = Musual)]
misc$link = temp.names
- misc$expected = TRUE
- misc$earg = vector("list", Musual * NOS)
-
names(misc$link) <-
- names(misc$earg) <-
- c(mynames1, mynames2)[interleave.VGAM(Musual*NOS, M = Musual)]
+ c(mynames1, mynames2)[interleave.VGAM(Musual*NOS, M = Musual)]
+ misc$earg = vector("list", Musual * NOS)
+ names(misc$earg) <- names(misc$link)
for(ii in 1:NOS) {
misc$earg[[Musual*ii-1]] = .epobs_0
misc$earg[[Musual*ii ]] = .elambda
@@ -556,7 +581,7 @@ rzipois = function(n, lambda, pstr0 = 0) {
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
- sum(w * dzapois(x = y, pobs0 = pobs0, lambda = lambda, log = TRUE))
+ sum(c(w) * dzapois(x = y, pobs0 = pobs0, lambda = lambda, log = TRUE))
}
}, list( .lpobs_0 = lpobs_0, .llambda = llambda,
.epobs_0 = epobs_0, .elambda = elambda ))),
@@ -597,7 +622,7 @@ rzipois = function(n, lambda, pstr0 = 0) {
.epobs_0 = epobs_0, .elambda = elambda ))),
weight = eval(substitute(expression({
- wz = matrix(0.0, n, Musual*NOS)
+ wz = matrix(0.0, n, Musual * NOS)
@@ -629,6 +654,8 @@ rzipois = function(n, lambda, pstr0 = 0) {
wz = wz[, interleave.VGAM(ncol(wz), M = Musual)]
+
+
wz
}), list( .lpobs_0 = lpobs_0,
.epobs_0 = epobs_0 ))))
@@ -647,7 +674,6 @@ zanegbinomial.control <- function(save.weight = TRUE, ...)
zanegbinomial =
function(lpobs0 = "logit", lmunb = "loge", lsize = "loge",
- epobs0 = list(), emunb = list(), esize = list(),
ipobs0 = NULL, isize = NULL,
zero = c(-1, -3),
imethod = 1,
@@ -681,16 +707,19 @@ zanegbinomial.control <- function(save.weight = TRUE, ...)
shrinkage.init > 1)
stop("bad input for argument 'shrinkage.init'")
- if (mode(lmunb) != "character" && mode(lmunb) != "name")
- lmunb = as.character(substitute(lmunb))
- if (mode(lsize) != "character" && mode(lsize) != "name")
- lsize = as.character(substitute(lsize))
- if (mode(lpobs0) != "character" && mode(lpobs0) != "name")
- lpobs0 = as.character(substitute(lpobs0))
- if (!is.list(epobs0)) epobs0 = list()
- if (!is.list(emunb)) emunb = list()
- if (!is.list(esize)) esize = list()
+ lmunb <- as.list(substitute(lmunb))
+ emunb <- link2list(lmunb)
+ lmunb <- attr(emunb, "function.name")
+
+ lsize <- as.list(substitute(lsize))
+ esize <- link2list(lsize)
+ lsize <- attr(esize, "function.name")
+
+ lpobs0 <- as.list(substitute(lpobs0))
+ epobs0 <- link2list(lpobs0)
+ lpobs0 <- attr(epobs0, "function.name")
+
@@ -711,45 +740,60 @@ zanegbinomial.control <- function(save.weight = TRUE, ...)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
Musual <- 3
- y <- as.matrix(y)
- extra$NOS = NOS = ncoly = ncol(y) # Number of species
- M = Musual * ncoly #
- if (any(y != round(y)))
- stop("the response must be integer-valued")
if (any(y < 0))
stop("the response must not have negative values")
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ Is.integer.y = TRUE,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+ extra$NOS = NOS = ncoly = ncol(y) # Number of species
+ M = Musual * ncoly #
+
mynames1 = if (NOS == 1) "pobs0" else paste("pobs0", 1:NOS, sep = "")
mynames2 = if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = "")
mynames3 = if (NOS == 1) "size" else paste("size", 1:NOS, sep = "")
- predictors.names =
+ predictors.names <-
c(namesof(mynames1, .lpobs0 , earg = .epobs0 , tag = FALSE),
namesof(mynames2, .lmunb , earg = .emunb , tag = FALSE),
namesof(mynames3, .lsize , earg = .esize , tag = FALSE))[
interleave.VGAM(Musual*NOS, M = Musual)]
- extra$y0 = y0 = ifelse(y == 0, 1, 0)
- extra$skip.these = skip.these = matrix(as.logical(y0), n, NOS)
+ extra$y0 <- y0 <- ifelse(y == 0, 1, 0)
+ extra$skip.these <- skip.these <- matrix(as.logical(y0), n, NOS)
if (!length(etastart)) {
- mu.init = y
+ mu.init <- y
for(iii in 1:ncol(y)) {
- index.posy = (y[, iii] > 0)
- use.this = if ( .imethod == 2) {
- weighted.mean(y[index.posy, iii], w[index.posy])
+ index.posy <- (y[, iii] > 0)
+ if ( .imethod == 1) {
+ use.this <- weighted.mean(y[index.posy, iii],
+ w[index.posy, iii])
+ mu.init[ index.posy, iii] = (1 - .sinit ) * y[index.posy, iii] +
+ .sinit * use.this
+ mu.init[!index.posy, iii] = use.this
} else {
- median(rep(y[index.posy, iii], w[index.posy])) + 1/2
+ use.this <-
+ mu.init[, iii] <- (y[, iii] +
+ weighted.mean(y[index.posy, iii], w[index.posy, iii])) / 2
}
- mu.init[ index.posy, iii] = (1 - .sinit ) * y[index.posy, iii] +
- .sinit * use.this
- mu.init[!index.posy, iii] = use.this
+if (TRUE) {
max.use.this = 7 * use.this + 10
vecTF = (mu.init[, iii] > max.use.this)
if (any(vecTF))
mu.init[vecTF, iii] = max.use.this
+}
}
pnb0 = matrix(if (length( .ipobs0 )) .ipobs0 else -1,
@@ -765,9 +809,9 @@ zanegbinomial.control <- function(save.weight = TRUE, ...)
if ( is.Numeric( .isize )) {
kmat0 = matrix( .isize , nrow = n, ncol = ncoly, byrow = TRUE)
} else {
- posnegbinomial.Loglikfun = function(kmat, y, x, w, extraargs) {
+ posnegbinomial.Loglikfun <- function(kmat, y, x, w, extraargs) {
munb = extraargs
- sum(w * dposnegbin(x = y, munb = munb, size = kmat,
+ sum(c(w) * dposnegbin(x = y, munb = munb, size = kmat,
log = TRUE))
}
k.grid = 2^((-6):6)
@@ -777,8 +821,8 @@ zanegbinomial.control <- function(save.weight = TRUE, ...)
posy = y[index.posy, spp.]
kmat0[, spp.] = getMaxMin(k.grid,
objfun = posnegbinomial.Loglikfun,
- y = posy, x = x[index.posy,],
- w = w[index.posy],
+ y = posy, x = x[index.posy, ],
+ w = w[index.posy, spp.],
extraargs = mu.init[index.posy, spp.])
}
}
@@ -827,6 +871,7 @@ zanegbinomial.control <- function(save.weight = TRUE, ...)
misc$imethod = .imethod
misc$ipobs0 = .ipobs0
misc$isize = .isize
+ misc$multipleResponses <- TRUE
}), list( .lpobs0 = lpobs0, .lmunb = lmunb, .lsize = lsize,
.epobs0 = epobs0, .emunb = emunb, .esize = esize,
.ipobs0 = ipobs0, .isize = isize,
@@ -841,7 +886,7 @@ zanegbinomial.control <- function(save.weight = TRUE, ...)
kmat = eta2theta(eta[, Musual*(1:NOS) ], .lsize , earg = .esize )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dzanegbin(x = y, pobs0 = phi0, munb = munb, size = kmat,
+ sum(c(w) * dzanegbin(x = y, pobs0 = phi0, munb = munb, size = kmat,
log = TRUE))
}
}, list( .lpobs0 = lpobs0, .lmunb = lmunb, .lsize = lsize,
@@ -999,6 +1044,7 @@ zanegbinomial.control <- function(save.weight = TRUE, ...)
wz[, Musual*(1:NOS)-2] = tmp200
+
wz
}), list( .lpobs0 = lpobs0,
.epobs0 = epobs0,
@@ -1015,7 +1061,7 @@ zanegbinomial.control <- function(save.weight = TRUE, ...)
if (FALSE)
-rposnegbin = function(n, munb, size) {
+rposnegbin <- function(n, munb, size) {
if (!is.Numeric(size, positive = TRUE))
stop("argument 'size' must be positive")
if (!is.Numeric(munb, positive = TRUE))
@@ -1036,7 +1082,7 @@ rposnegbin = function(n, munb, size) {
}
if (FALSE)
-dposnegbin = function(x, munb, size, log = FALSE) {
+dposnegbin <- function(x, munb, size, log = FALSE) {
if (!is.Numeric(size, positive = TRUE))
stop("argument 'size' must be positive")
if (!is.Numeric(munb, positive = TRUE))
@@ -1058,21 +1104,26 @@ dposnegbin = function(x, munb, size, log = FALSE) {
- zipoisson = function(lpstr0 = "logit", llambda = "loge",
- epstr0 = list(), elambda = list(),
- ipstr0 = NULL, ilambda = NULL,
- imethod = 1,
- shrinkage.init = 0.8, zero = NULL)
+ zipoisson <- function(lpstr0 = "logit", llambda = "loge",
+ ipstr0 = NULL, ilambda = NULL,
+ imethod = 1,
+ shrinkage.init = 0.8, zero = NULL)
{
+ ipstr00 <- ipstr0
+
+
+
+
+ lpstr0 <- as.list(substitute(lpstr0))
+ epstr00 <- link2list(lpstr0)
+ lpstr00 <- attr(epstr00, "function.name")
+
+ llambda <- as.list(substitute(llambda))
+ elambda <- link2list(llambda)
+ llambda <- attr(elambda, "function.name")
+
- if (mode(lpstr0) != "character" && mode(lpstr0) != "name")
- lpstr0 = as.character(substitute(lpstr0))
- if (mode(llambda) != "character" && mode(llambda) != "name")
- llambda = as.character(substitute(llambda))
- lpstr00 <- lpstr0
- epstr00 <- epstr0
- ipstr00 <- ipstr0
if (length(ipstr00))
if (!is.Numeric(ipstr00, positive = TRUE) ||
@@ -1082,8 +1133,6 @@ dposnegbin = function(x, munb, size, log = FALSE) {
if (!is.Numeric(ilambda, positive = TRUE))
stop("argument 'ilambda' values must be positive")
- if (!is.list(epstr00)) epstr00 = list()
- if (!is.list(elambda)) elambda = list()
if (!is.Numeric(imethod, allowable.length = 1,
integer.valued = TRUE, positive = TRUE) ||
@@ -1111,10 +1160,23 @@ dposnegbin = function(x, munb, size, log = FALSE) {
infos = eval(substitute(function(...) {
list(Musual = 2,
- zero = .zero)
+ zero = .zero )
}, list( .zero = zero ))),
initialize = eval(substitute(expression({
- y <- as.matrix(y)
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ Is.integer.y = TRUE,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
ncoly <- ncol(y)
Musual <- 2
@@ -1162,7 +1224,7 @@ dposnegbin = function(x, munb, size, log = FALSE) {
}
zipois.Loglikfun <- function(phival, y, x, w, extraargs) {
- sum(w * dzipois(x = y, pstr0 = phival,
+ sum(c(w) * dzipois(x = y, pstr0 = phival,
lambda = extraargs$lambda,
log = TRUE))
}
@@ -1215,6 +1277,7 @@ dposnegbin = function(x, munb, size, log = FALSE) {
misc$Musual <- Musual
misc$imethod <- .imethod
misc$expected <- TRUE
+ misc$multipleResponses <- TRUE
misc$pobs0 = phimat + (1 - phimat) * exp(-lambda) # P(Y=0)
if (length(dimnames(y)[[2]]) > 0)
@@ -1229,7 +1292,7 @@ dposnegbin = function(x, munb, size, log = FALSE) {
lambda = eta2theta(eta[, c(FALSE, TRUE)], .llambda , earg = .elambda )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dzipois(x = y, pstr0 = phimat, lambda = lambda,
+ sum(c(w) * dzipois(x = y, pstr0 = phimat, lambda = lambda,
log = TRUE))
}
}, list( .lpstr00 = lpstr00, .llambda = llambda,
@@ -1237,31 +1300,32 @@ dposnegbin = function(x, munb, size, log = FALSE) {
vfamily = c("zipoisson"),
deriv = eval(substitute(expression({
Musual <- 2
- phimat = eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lpstr00 ,
+ phimat <- eta2theta(eta[, c(TRUE, FALSE), drop = FALSE], .lpstr00 ,
earg = .epstr00 )
- lambda = eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .llambda ,
+ lambda <- eta2theta(eta[, c(FALSE, TRUE), drop = FALSE], .llambda ,
earg = .elambda )
- prob0 = phimat + (1 - phimat) * exp(-lambda)
- index0 = as.matrix(y == 0)
+ prob0 <- phimat + (1 - phimat) * exp(-lambda)
+ index0 <- as.matrix(y == 0)
+
+ dl.dphimat <- -expm1(-lambda) / prob0
+ dl.dphimat[!index0] <- -1 / (1 - phimat[!index0])
- dl.dphimat = -expm1(-lambda) / prob0
- dl.dphimat[!index0] = -1 / (1 - phimat[!index0])
- dl.dlambda = -(1 - phimat) * exp(-lambda) / prob0
- dl.dlambda[!index0] = (y[!index0] - lambda[!index0]) / lambda[!index0]
+ dl.dlambda <- -(1 - phimat) * exp(-lambda) / prob0
+ dl.dlambda[!index0] <- (y[!index0] - lambda[!index0]) / lambda[!index0]
- dphimat.deta = dtheta.deta(phimat, .lpstr00 , earg = .epstr00 )
- dlambda.deta = dtheta.deta(lambda, .llambda , earg = .elambda )
+ dphimat.deta <- dtheta.deta(phimat, .lpstr00 , earg = .epstr00 )
+ dlambda.deta <- dtheta.deta(lambda, .llambda , earg = .elambda )
- ans = c(w) * cbind(dl.dphimat * dphimat.deta,
- dl.dlambda * dlambda.deta)
+ ans <- c(w) * cbind(dl.dphimat * dphimat.deta,
+ dl.dlambda * dlambda.deta)
ans <- ans[, interleave.VGAM(M, M = Musual)]
if ( .llambda == "loge" && is.empty.list( .elambda ) &&
any(lambda[!index0] < .Machine$double.eps)) {
for(spp. in 1:(M / Musual)) {
- ans[!index0[, spp.], Musual * spp.] =
+ ans[!index0[, spp.], Musual * spp.] <-
w[!index0[, spp.]] *
(y[!index0[, spp.], spp.] - lambda[!index0[, spp.], spp.])
}
@@ -1271,30 +1335,34 @@ dposnegbin = function(x, munb, size, log = FALSE) {
}), list( .lpstr00 = lpstr00, .llambda = llambda,
.epstr00 = epstr00, .elambda = elambda ))),
weight = eval(substitute(expression({
- wz = matrix(0.0, nrow = n, ncol = M + M-1)
+ wz <- matrix(0.0, nrow = n, ncol = M + M-1)
- d2l.dphimat2 = -expm1(-lambda) / ((1 - phimat) * prob0)
- d2l.dlambda2 = (1 - phimat) / lambda -
- phimat * (1 - phimat) * exp(-lambda) / prob0
- d2l.dphimatlambda = -exp(-lambda) / prob0
+ ned2l.dphimat2 <- -expm1(-lambda) / ((1 - phimat) * prob0)
+ ned2l.dphimatlambda <- -exp(-lambda) / prob0
+ ned2l.dlambda2 <- (1 - phimat) / lambda -
+ phimat * (1 - phimat) * exp(-lambda) / prob0
- d2l.dphimat2 = as.matrix(d2l.dphimat2)
- d2l.dlambda2 = as.matrix(d2l.dlambda2)
- d2l.dphimatlambda = as.matrix(d2l.dphimatlambda)
+
+
+
+
+ ned2l.dphimat2 <- as.matrix(ned2l.dphimat2)
+ ned2l.dlambda2 <- as.matrix(ned2l.dlambda2)
+ ned2l.dphimatlambda <- as.matrix(ned2l.dphimatlambda)
for (ii in 1:(M / Musual)) {
wz[, iam(Musual * ii - 1, Musual * ii - 1, M)] <-
- d2l.dphimat2[, ii] * dphimat.deta[, ii]^2
+ ned2l.dphimat2[, ii] * dphimat.deta[, ii]^2
wz[, iam(Musual * ii , Musual * ii , M)] <-
- d2l.dlambda2[, ii] * dlambda.deta[, ii]^2
+ ned2l.dlambda2[, ii] * dlambda.deta[, ii]^2
wz[, iam(Musual * ii - 1, Musual * ii , M)] <-
- d2l.dphimatlambda[, ii] * dphimat.deta[, ii] * dlambda.deta[, ii]
-
+ ned2l.dphimatlambda[, ii] * dphimat.deta[, ii] * dlambda.deta[, ii]
}
- c(w) * wz
+
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
}), list( .llambda = llambda, .elambda = elambda ))))
} # zipoisson
@@ -1306,18 +1374,21 @@ dposnegbin = function(x, munb, size, log = FALSE) {
- zibinomial = function(lpstr0 = "logit", lprob = "logit",
- epstr0 = list(), eprob = list(),
- ipstr0 = NULL,
- zero = 1, mv = FALSE, imethod = 1)
+ zibinomial <- function(lpstr0 = "logit", lprob = "logit",
+ ipstr0 = NULL,
+ zero = 1, mv = FALSE, imethod = 1)
{
if (as.logical(mv))
stop("argument 'mv' must be FALSE")
- if (mode(lpstr0) != "character" && mode(lpstr0) != "name")
- lpstr0 = as.character(substitute(lpstr0))
- if (mode(lprob) != "character" && mode(lprob) != "name")
- lprob = as.character(substitute(lprob))
+ lpstr0 <- as.list(substitute(lpstr0))
+ epstr0 <- link2list(lpstr0)
+ lpstr0 <- attr(epstr0, "function.name")
+
+ lprob <- as.list(substitute(lprob))
+ eprob <- link2list(lprob)
+ lprob <- attr(eprob, "function.name")
+
if (is.Numeric(ipstr0))
if (!is.Numeric(ipstr0, positive = TRUE) || any(ipstr0 >= 1))
@@ -1327,8 +1398,6 @@ dposnegbin = function(x, munb, size, log = FALSE) {
imethod > 2)
stop("argument 'imethod' must be 1 or 2")
- if (!is.list(epstr0)) epstr0 = list()
- if (!is.list(eprob )) eprob = list()
new("vglmff",
@@ -1336,19 +1405,19 @@ dposnegbin = function(x, munb, size, log = FALSE) {
"Links: ",
namesof("pstr0", lpstr0, earg = epstr0), ", ",
namesof("prob" , lprob , earg = eprob ), "\n",
- "Mean: (1 - pstr0) * prob / (1 - (1 - prob)^w)"),
+ "Mean: (1 - pstr0) * prob"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
- if (!all(w == 1))
- extra$orig.w = w
+ if (!all(w == 1))
+ extra$orig.w <- w
{
- NCOL = function (x)
- if (is.array(x) && length(dim(x)) > 1 ||
- is.data.frame(x)) ncol(x) else as.integer(1)
+ NCOL <- function (x)
+ if (is.array(x) && length(dim(x)) > 1 ||
+ is.data.frame(x)) ncol(x) else as.integer(1)
if (NCOL(y) == 1) {
if (is.factor(y)) y <- y != levels(y)[1]
@@ -1359,23 +1428,23 @@ dposnegbin = function(x, munb, size, log = FALSE) {
mustart = (0.5 + w * y) / (1.0 + w)
- no.successes = y
+ no.successes <- y
if (min(y) < 0)
- stop("Negative data not allowed!")
+ stop("Negative data not allowed!")
if (any(abs(no.successes - round(no.successes)) > 1.0e-8))
- stop("Number of successes must be integer-valued")
+ stop("Number of successes must be integer-valued")
} else if (NCOL(y) == 2) {
if (min(y) < 0)
- stop("Negative data not allowed!")
+ stop("Negative data not allowed!")
if (any(abs(y - round(y)) > 1.0e-8))
- stop("Count data must be integer-valued")
- y = round(y)
- nvec = y[, 1] + y[, 2]
- y = ifelse(nvec > 0, y[, 1] / nvec, 0)
- w = w * nvec
+ stop("Count data must be integer-valued")
+ y <- round(y)
+ nvec <- y[, 1] + y[, 2]
+ y <- ifelse(nvec > 0, y[, 1] / nvec, 0)
+ w <- w * nvec
if (!length(mustart) && !length(etastart))
- mustart = (0.5 + nvec * y) / (1 + nvec)
+ mustart <- (0.5 + nvec * y) / (1 + nvec)
} else {
stop("for the binomialff family, response 'y' must be a ",
"vector of 0 and 1's\n",
@@ -1387,17 +1456,21 @@ dposnegbin = function(x, munb, size, log = FALSE) {
}
+ if ( .imethod == 1)
+ mustart <- (mustart + y) / 2
+
+
- predictors.names =
+ predictors.names <-
c(namesof("pstr0", .lpstr0 , earg = .epstr0 , tag = FALSE),
namesof("prob" , .lprob , earg = .eprob , tag = FALSE))
- phi.init = if (length( .ipstr0 )) .ipstr0 else {
- prob0.est = sum(w[y == 0]) / sum(w)
+ phi.init <- if (length( .ipstr0 )) .ipstr0 else {
+ prob0.est <- sum(w[y == 0]) / sum(w)
if ( .imethod == 1) {
(prob0.est - (1 - mustart)^w) / (1 - (1 - mustart)^w)
} else {
@@ -1405,33 +1478,35 @@ dposnegbin = function(x, munb, size, log = FALSE) {
}
}
- phi.init[phi.init <= -0.10] = 0.50 # Lots of sample variation
- phi.init[phi.init <= 0.01] = 0.05 # Last resort
- phi.init[phi.init >= 0.99] = 0.95 # Last resort
+ phi.init[phi.init <= -0.10] <- 0.50 # Lots of sample variation
+ phi.init[phi.init <= 0.01] <- 0.05 # Last resort
+ phi.init[phi.init >= 0.99] <- 0.95 # Last resort
if ( length(mustart) && !length(etastart))
- mustart = cbind(rep(phi.init, len = n),
- mustart) # 1st coln not a real mu
+ mustart <- cbind(rep(phi.init, len = n),
+ mustart) # 1st coln not a real mu
}), list( .lpstr0 = lpstr0, .lprob = lprob,
.epstr0 = epstr0, .eprob = eprob,
.ipstr0 = ipstr0,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- phi = eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
- mubin = eta2theta(eta[, 2], .lprob , earg = .eprob )
- (1 - phi) * mubin
+ pstr0 <- eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
+ mubin <- eta2theta(eta[, 2], .lprob , earg = .eprob )
+ (1 - pstr0) * mubin
}, list( .lpstr0 = lpstr0, .lprob = lprob,
.epstr0 = epstr0, .eprob = eprob ))),
last = eval(substitute(expression({
- misc$link = c("pstr0" = .lpstr0 , "prob" = .lprob )
- misc$earg = list("pstr0" = .epstr0 , "prob" = .eprob )
- misc$imethod = .imethod
+ misc$link <- c("pstr0" = .lpstr0 , "prob" = .lprob )
+
+ misc$earg <- list("pstr0" = .epstr0 , "prob" = .eprob )
+
+ misc$imethod <- .imethod
if (intercept.only && all(w == w[1])) {
- phi = eta2theta(eta[1, 1], .lpstr0 , earg = .epstr0 )
- mubin = eta2theta(eta[1, 2], .lprob , earg = .eprob )
- misc$pobs0 = phi + (1-phi) * (1-mubin)^w[1] # P(Y=0)
+ phi <- eta2theta(eta[1, 1], .lpstr0 , earg = .epstr0 )
+ mubin <- eta2theta(eta[1, 2], .lprob , earg = .eprob )
+ misc$pobs0 <- phi + (1 - phi) * (1 - mubin)^w[1] # P(Y=0)
}
}), list( .lpstr0 = lpstr0, .lprob = lprob,
.epstr0 = epstr0, .eprob = eprob,
@@ -1443,8 +1518,8 @@ dposnegbin = function(x, munb, size, log = FALSE) {
.epstr0 = epstr0, .eprob = eprob ))),
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- pstr0 = eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
- mubin = eta2theta(eta[, 2], .lprob , earg = .eprob )
+ pstr0 <- eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
+ mubin <- eta2theta(eta[, 2], .lprob , earg = .eprob )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(dzibinom(x = round(w * y), size = w, prob = mubin,
@@ -1454,54 +1529,62 @@ dposnegbin = function(x, munb, size, log = FALSE) {
.epstr0 = epstr0, .eprob = eprob ))),
vfamily = c("zibinomial"),
deriv = eval(substitute(expression({
- phi = eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
- mubin = eta2theta(eta[, 2], .lprob , earg = .eprob )
-
- prob0 = (1 - mubin)^w # Actually q^w
- tmp8 = phi + (1 - phi) * prob0
- index = (y == 0)
- dl.dphi = (1 - prob0) / tmp8
- dl.dphi[!index] = -1 / (1 - phi[!index])
- dl.dmubin = -w * (1 - phi) * (1 - mubin)^(w - 1) / tmp8
- dl.dmubin[!index] = w[!index] *
- (y[!index] / mubin[!index] -
+ phi <- eta2theta(eta[, 1], .lpstr0 , earg = .epstr0 )
+ mubin <- eta2theta(eta[, 2], .lprob , earg = .eprob )
+
+ prob0 <- (1 - mubin)^w # Actually q^w
+ tmp8 <- phi + (1 - phi) * prob0
+ index <- (y == 0)
+ dl.dphi <- (1 - prob0) / tmp8
+ dl.dphi[!index] <- -1 / (1 - phi[!index])
+
+ dl.dmubin <- -w * (1 - phi) * (1 - mubin)^(w - 1) / tmp8
+ dl.dmubin[!index] <- w[!index] *
+ ( y[!index] / mubin[!index] -
(1 - y[!index]) / (1 - mubin[!index]))
- dphi.deta = dtheta.deta(phi, .lpstr0 , earg = .epstr0 )
- dmubin.deta = dtheta.deta(mubin, .lprob , earg = .eprob )
- ans = cbind(dl.dphi * dphi.deta,
- dl.dmubin * dmubin.deta)
+
+ dphi.deta <- dtheta.deta(phi, .lpstr0 , earg = .epstr0 )
+ dmubin.deta <- dtheta.deta(mubin, .lprob , earg = .eprob )
+
+ ans <- cbind(dl.dphi * dphi.deta,
+ dl.dmubin * dmubin.deta)
+
if ( .lprob == "logit") {
- ans[!index,2] = w[!index] * (y[!index] - mubin[!index])
+ ans[!index, 2] <- w[!index] * (y[!index] - mubin[!index])
}
+
ans
}), list( .lpstr0 = lpstr0, .lprob = lprob,
.epstr0 = epstr0, .eprob = eprob ))),
weight = eval(substitute(expression({
- wz = matrix(as.numeric(NA), nrow = n, ncol = dimm(M))
+ wz <- matrix(as.numeric(NA), nrow = n, ncol = dimm(M))
+
+ ned2l.dphi2 <- (1 - prob0) / ((1 - phi) * tmp8)
- d2l.dphi2 = (1 - prob0) / ((1 - phi) * tmp8)
+ ned2l.dphimubin <- -w * ((1 - mubin)^(w - 1)) / tmp8
- d2l.dphimubin = -w * (1 - mubin)^(w - 1) / tmp8
- d2l.dmubin2 = w * (1 - phi) *
- (1 / (mubin * (1 - mubin)) -
- (tmp8 * (w - 1) * (1 - mubin)^(w - 2) -
- (1 - phi) * w * (1 - mubin)^(2*(w - 1))) / tmp8)
+ ned2l.dmubin2 <- (w * (1 - phi) / (mubin * (1 - mubin)^2)) *
+ (1 - mubin - w * mubin * (1 - mubin)^w * phi / tmp8)
- wz[,iam(1,1,M)] = d2l.dphi2 * dphi.deta^2
- wz[,iam(2,2,M)] = d2l.dmubin2 * dmubin.deta^2
- wz[,iam(1,2,M)] = d2l.dphimubin * dphi.deta * dmubin.deta
+
+
+
+
+ wz[,iam(1, 1, M)] <- ned2l.dphi2 * dphi.deta^2
+ wz[,iam(2, 2, M)] <- ned2l.dmubin2 * dmubin.deta^2
+ wz[,iam(1, 2, M)] <- ned2l.dphimubin * dphi.deta * dmubin.deta
if (TRUE) {
- ind6 = (wz[,iam(2,2,M)] < .Machine$double.eps)
+ ind6 <- (wz[, iam(2, 2, M)] < .Machine$double.eps)
if (any(ind6))
- wz[ind6,iam(2,2,M)] = .Machine$double.eps
+ wz[ind6, iam(2, 2, M)] <- .Machine$double.eps
}
wz
}), list( .lpstr0 = lpstr0, .lprob = lprob,
@@ -1517,8 +1600,8 @@ dposnegbin = function(x, munb, size, log = FALSE) {
-dzibinom = function(x, size, prob, pstr0 = 0, log = FALSE) {
- if (!is.logical(log.arg <- log))
+dzibinom <- function(x, size, prob, pstr0 = 0, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -1549,7 +1632,7 @@ dzibinom = function(x, size, prob, pstr0 = 0, log = FALSE) {
}
-pzibinom = function(q, size, prob, pstr0 = 0,
+pzibinom <- function(q, size, prob, pstr0 = 0,
lower.tail = TRUE, log.p = FALSE) {
LLL = max(length(pstr0), length(size), length(prob), length(q))
@@ -1571,7 +1654,7 @@ pzibinom = function(q, size, prob, pstr0 = 0,
}
-qzibinom = function(p, size, prob, pstr0 = 0,
+qzibinom <- function(p, size, prob, pstr0 = 0,
lower.tail = TRUE, log.p = FALSE) {
LLL = max(length(p), length(size), length(prob), length(pstr0))
p = rep(p, length = LLL)
@@ -1613,7 +1696,7 @@ qzibinom = function(p, size, prob, pstr0 = 0,
}
-rzibinom = function(n, size, prob, pstr0 = 0) {
+rzibinom <- function(n, size, prob, pstr0 = 0) {
use.n = if ((length.n <- length(n)) > 1) length.n else
if (!is.Numeric(n, integer.valued = TRUE,
allowable.length = 1, positive = TRUE))
@@ -1655,7 +1738,7 @@ rzibinom = function(n, size, prob, pstr0 = 0) {
-dzinegbin = function(x, size, prob = NULL, munb = NULL, pstr0 = 0,
+dzinegbin <- function(x, size, prob = NULL, munb = NULL, pstr0 = 0,
log = FALSE) {
if (length(munb)) {
if (length(prob))
@@ -1663,7 +1746,7 @@ dzinegbin = function(x, size, prob = NULL, munb = NULL, pstr0 = 0,
prob <- size / (size + munb)
}
- if (!is.logical(log.arg <- log))
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -1693,7 +1776,7 @@ dzinegbin = function(x, size, prob = NULL, munb = NULL, pstr0 = 0,
}
-pzinegbin = function(q, size, prob = NULL, munb = NULL, pstr0 = 0) {
+pzinegbin <- function(q, size, prob = NULL, munb = NULL, pstr0 = 0) {
if (length(munb)) {
if (length(prob))
stop("arguments 'prob' and 'munb' both specified")
@@ -1723,7 +1806,7 @@ pzinegbin = function(q, size, prob = NULL, munb = NULL, pstr0 = 0) {
}
-qzinegbin = function(p, size, prob = NULL, munb = NULL, pstr0 = 0) {
+qzinegbin <- function(p, size, prob = NULL, munb = NULL, pstr0 = 0) {
if (length(munb)) {
if (length(prob))
stop("arguments 'prob' and 'munb' both specified")
@@ -1766,7 +1849,7 @@ qzinegbin = function(p, size, prob = NULL, munb = NULL, pstr0 = 0) {
}
-rzinegbin = function(n, size, prob = NULL, munb = NULL, pstr0 = 0) {
+rzinegbin <- function(n, size, prob = NULL, munb = NULL, pstr0 = 0) {
if (length(munb)) {
if (length(prob))
stop("arguments 'prob' and 'munb' both specified")
@@ -1817,21 +1900,30 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
}
- zinegbinomial =
+ zinegbinomial <-
function(lpstr0 = "logit", lmunb = "loge", lsize = "loge",
- epstr0 = list(), emunb = list(), esize = list(),
ipstr0 = NULL, isize = NULL,
zero = c(-1, -3),
imethod = 1, shrinkage.init = 0.95,
nsimEIM = 250)
{
- if (mode(lpstr0) != "character" && mode(lpstr0) != "name")
- lpstr0 = as.character(substitute(lpstr0))
- if (mode(lmunb) != "character" && mode(lmunb) != "name")
- lmunb = as.character(substitute(lmunb))
- if (mode(lsize) != "character" && mode(lsize) != "name")
- lsize = as.character(substitute(lsize))
+
+
+ lpstr0 <- as.list(substitute(lpstr0))
+ epstr0 <- link2list(lpstr0)
+ lpstr0 <- attr(epstr0, "function.name")
+
+ lmunb <- as.list(substitute(lmunb))
+ emunb <- link2list(lmunb)
+ lmunb <- attr(emunb, "function.name")
+
+ lsize <- as.list(substitute(lsize))
+ esize <- link2list(lsize)
+ lsize <- attr(esize, "function.name")
+
+
+
if (length(ipstr0) &&
@@ -1856,9 +1948,8 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
shrinkage.init > 1)
stop("bad input for argument 'shrinkage.init'")
- if (!is.list(epstr0)) epstr0 = list()
- if (!is.list(emunb)) emunb = list()
- if (!is.list(esize)) esize = list()
+
+
new("vglmff",
blurb = c("Zero-inflated negative binomial\n\n",
@@ -1875,7 +1966,21 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
Musual <- 3
- y <- as.matrix(y)
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ Is.integer.y = TRUE,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+
extra$NOS = NOS = ncoly = ncol(y) # Number of species
if (length(dimnames(y)))
extra$dimnamesy2 = dimnames(y)[[2]]
@@ -1883,7 +1988,7 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
mynames1 = if (NOS == 1) "pstr0" else paste("pstr0", 1:NOS, sep = "")
mynames2 = if (NOS == 1) "munb" else paste("munb", 1:NOS, sep = "")
mynames3 = if (NOS == 1) "size" else paste("size", 1:NOS, sep = "")
- predictors.names =
+ predictors.names <-
c(namesof(mynames1, .lpstr0 , earg = .epstr0 , tag = FALSE),
namesof(mynames2, .lmunb , earg = .emunb , tag = FALSE),
namesof(mynames3, .lsize , earg = .esize , tag = FALSE))[
@@ -1897,8 +2002,8 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
for(iii in 1:ncol(y)) {
index = (y[, iii] > 0)
mum.init[, iii] = if ( .imethod == 2)
- weighted.mean(y[index, iii], w = w[index]) else
- median(rep(y[index, iii], times = w[index])) + 1/8
+ weighted.mean(y[index, iii], w = w[index, iii]) else
+ median(rep(y[index, iii], times = w[index, iii])) + 1/8
}
(1 - .sinit) * (y + 1/16) + .sinit * mum.init
}
@@ -1909,7 +2014,7 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
} else {
pstr0.init = y
for(iii in 1:ncol(y))
- pstr0.init[, iii] = sum(w[y[, iii] == 0]) / sum(w)
+ pstr0.init[, iii] = sum(w[y[, iii] == 0, iii]) / sum(w[, iii])
pstr0.init[pstr0.init <= 0.02] = 0.02 # Last resort
pstr0.init[pstr0.init >= 0.98] = 0.98 # Last resort
pstr0.init
@@ -1919,7 +2024,7 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
if ( is.Numeric( .isize )) {
matrix( .isize, nrow = n, ncol = ncoly, byrow = TRUE)
} else {
- zinegbin.Loglikfun = function(kval, y, x, w, extraargs) {
+ zinegbin.Loglikfun <- function(kval, y, x, w, extraargs) {
index0 = (y == 0)
pstr0vec = extraargs$pstr0
muvec = extraargs$mu
@@ -1943,7 +2048,7 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
for(spp. in 1:NOS) {
kay.init[, spp.] = getMaxMin(k.grid,
objfun = zinegbin.Loglikfun,
- y = y[, spp.], x = x, w = w,
+ y = y[, spp.], x = x, w = w[, spp.],
extraargs = list(pstr0 = pstr0.init[, spp.],
mu = mum.init[, spp.]))
}
@@ -1962,15 +2067,15 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
.sinit = shrinkage.init,
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- Musual = 3
- NOS = extra$NOS
- pstr0 = eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
- .lpstr0 , earg = .epstr0 )
- munb = eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
- .lmunb , earg = .emunb )
- fv.matrix = (1 - pstr0) * munb
+ Musual <- 3
+ NOS <- extra$NOS
+ pstr0 <- eta2theta(eta[, Musual*(1:NOS)-2, drop = FALSE],
+ .lpstr0 , earg = .epstr0 )
+ munb <- eta2theta(eta[, Musual*(1:NOS)-1, drop = FALSE],
+ .lmunb , earg = .emunb )
+ fv.matrix <- (1 - pstr0) * munb
if (length(extra$dimnamesy2))
- dimnames(fv.matrix) = list(dimnames(pstr0)[[1]], extra$dimnamesy2)
+ dimnames(fv.matrix) <- list(dimnames(pstr0)[[1]], extra$dimnamesy2)
fv.matrix
}, list( .lpstr0 = lpstr0, .lsize = lsize, .lmunb = lmunb,
.epstr0 = epstr0, .esize = esize, .emunb = emunb ))),
@@ -2000,10 +2105,12 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
misc$Musual = Musual
misc$ipstr0 = .ipstr0
misc$isize = .isize
+ misc$multipleResponses <- TRUE
+
if (intercept.only) {
- pstr0.val = eta2theta(eta[1,Musual*(1:NOS)-2], .lpstr0 , earg= .epstr0 )
- munb.val = eta2theta(eta[1,Musual*(1:NOS)-1], .lmunb , earg= .emunb )
- kval = eta2theta(eta[1,Musual*(1:NOS) ], .lsize , earg= .esize )
+ pstr0.val = eta2theta(eta[1, Musual*(1:NOS)-2], .lpstr0 , earg= .epstr0 )
+ munb.val = eta2theta(eta[1, Musual*(1:NOS)-1], .lmunb , earg= .emunb )
+ kval = eta2theta(eta[1, Musual*(1:NOS) ], .lsize , earg= .esize )
misc$pobs0 = pstr0.val +
(1 - pstr0.val) * (kval / (kval + munb.val))^kval # P(Y=0)
}
@@ -2023,7 +2130,7 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
.lsize , earg = .esize )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dzinegbin(x = y, size = kmat, munb = munb,
+ sum(c(w) * dzinegbin(x = y, size = kmat, munb = munb,
pstr0 = pstr0, log = TRUE))
}
}, list( .lpstr0 = lpstr0, .lmunb = lmunb, .lsize = lsize,
@@ -2180,7 +2287,9 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
}
}
}
- c(w) * wz
+
+
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = M / Musual)
}), list( .lpstr0 = lpstr0,
.epstr0 = epstr0, .nsimEIM = nsimEIM ))))
} # End of zinegbinomial
@@ -2193,18 +2302,23 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
zipoissonff <- function(llambda = "loge", lprobp = "logit",
- elambda = list(), eprobp = list(),
ilambda = NULL, iprobp = NULL, imethod = 1,
shrinkage.init = 0.8, zero = -2)
{
lprobp. <- lprobp
- eprobp. <- eprobp
iprobp. <- iprobp
- if (mode(llambda) != "character" && mode(llambda) != "name")
- llambda <- as.character(substitute(llambda))
- if (mode(lprobp.) != "character" && mode(lprobp.) != "name")
- lprobp. <- as.character(substitute(lprobp.))
+
+
+ llambda <- as.list(substitute(llambda))
+ elambda <- link2list(llambda)
+ llambda <- attr(elambda, "function.name")
+
+ lprobp <- as.list(substitute(lprobp))
+ eprobp. <- link2list(lprobp)
+ lprobp. <- attr(eprobp., "function.name")
+
+
if (length(ilambda))
if (!is.Numeric(ilambda, positive = TRUE))
@@ -2214,8 +2328,6 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
any(iprobp. >= 1))
stop("'iprobp' values must be inside the interval (0,1)")
- if (!is.list(elambda)) elambda <- list()
- if (!is.list(eprobp.)) eprobp. <- list()
if (!is.Numeric(imethod, allowable.length = 1,
integer.valued = TRUE, positive = TRUE) ||
@@ -2227,6 +2339,8 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
shrinkage.init > 1)
stop("bad input for argument 'shrinkage.init'")
+
+
new("vglmff",
blurb = c("Zero-inflated Poisson\n\n",
"Links: ",
@@ -2240,10 +2354,24 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(Musual = 2,
- zero = .zero)
+ zero = .zero )
}, list( .zero = zero ))),
initialize = eval(substitute(expression({
- y <- as.matrix(y)
+
+
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = Inf,
+ ncol.y.max = Inf,
+ Is.integer.y = TRUE,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
ncoly <- ncol(y)
Musual <- 2
@@ -2251,8 +2379,6 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
extra$Musual <- Musual
M <- Musual * ncoly
- if (any(round(y) != y))
- stop("responses must be integer-valued")
mynames1 <- paste("lambda", if (ncoly > 1) 1:ncoly else "", sep = "")
mynames2 <- paste("probp", if (ncoly > 1) 1:ncoly else "", sep = "")
@@ -2288,7 +2414,7 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
}
zipois.Loglikfun <- function(phival, y, x, w, extraargs) {
- sum(w * dzipois(x = y, pstr0 = phival,
+ sum(c(w) * dzipois(x = y, pstr0 = phival,
lambda = extraargs$lambda,
log = TRUE))
}
@@ -2339,13 +2465,14 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
misc$earg <- vector("list", Musual * ncoly)
names(misc$earg) <- temp.names
for(ii in 1:ncoly) {
- misc$earg[[Musual*ii-1]] <- .elambda
- misc$earg[[Musual*ii ]] <- .eprobp.
+ misc$earg[[Musual*ii-1]] <- .elambda
+ misc$earg[[Musual*ii ]] <- .eprobp.
}
misc$Musual <- Musual
misc$imethod <- .imethod
misc$expected = TRUE
+ misc$multipleResponses <- TRUE
misc$pobs0 <- (1 - probp.) + probp. * exp(-lambda) # P(Y=0)
misc$pobs0 <- as.matrix(misc$pobs0)
@@ -2366,7 +2493,7 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dzipois(x = y, pstr0 = 1 - probp., lambda = lambda,
+ sum(c(w) * dzipois(x = y, pstr0 = 1 - probp., lambda = lambda,
log = TRUE))
}
}, list( .lprobp. = lprobp., .llambda = llambda,
@@ -2414,29 +2541,29 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
wz <- matrix(0, nrow = n, ncol = M + M-1)
- d2l.dlambda2 <- ( probp.) / lambda -
+ ned2l.dlambda2 <- ( probp.) / lambda -
probp. * (1 - probp.) * exp(-lambda) / denom
- d2l.dprobp.2 <- -expm1(-lambda) / (( probp.) * denom)
- d2l.dphilambda <- +exp(-lambda) / denom
+ ned2l.dprobp.2 <- -expm1(-lambda) / (( probp.) * denom)
+ ned2l.dphilambda <- +exp(-lambda) / denom
if (ncoly == 1) { # Make sure these are matrices
- d2l.dlambda2 <- cbind(d2l.dlambda2)
- d2l.dprobp.2 <- cbind(d2l.dprobp.2)
+ ned2l.dlambda2 <- cbind(ned2l.dlambda2)
+ ned2l.dprobp.2 <- cbind(ned2l.dprobp.2)
dlambda.deta <- cbind(dlambda.deta)
dprobp..deta <- cbind(dprobp..deta)
- d2l.dphilambda <- cbind(d2l.dphilambda)
+ ned2l.dphilambda <- cbind(ned2l.dphilambda)
}
for (ii in 1:ncoly) {
wz[, iam(Musual*ii - 1, Musual*ii - 1, M)] <-
- d2l.dlambda2[, ii] *
+ ned2l.dlambda2[, ii] *
dlambda.deta[, ii]^2
wz[, iam(Musual*ii , Musual*ii , M)] <-
- d2l.dprobp.2[, ii] *
+ ned2l.dprobp.2[, ii] *
dprobp..deta[, ii]^2
wz[, iam(Musual*ii - 1, Musual*ii , M)] <-
- d2l.dphilambda[, ii] *
+ ned2l.dphilambda[, ii] *
dprobp..deta[, ii] *
dlambda.deta[, ii]
@@ -2445,7 +2572,7 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
} # ii
- c(w) * wz
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = ncoly)
}), list( .llambda = llambda ))))
}
@@ -2455,8 +2582,8 @@ zinegbinomial.control <- function(save.weight = TRUE, ...)
-dzigeom = function(x, prob, pstr0 = 0, log = FALSE) {
- if (!is.logical(log.arg <- log))
+dzigeom <- function(x, prob, pstr0 = 0, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -2489,7 +2616,7 @@ dzigeom = function(x, prob, pstr0 = 0, log = FALSE) {
-pzigeom = function(q, prob, pstr0 = 0) {
+pzigeom <- function(q, prob, pstr0 = 0) {
LLL = max(length(q), length(prob), length(pstr0))
@@ -2511,7 +2638,7 @@ pzigeom = function(q, prob, pstr0 = 0) {
-qzigeom = function(p, prob, pstr0 = 0) {
+qzigeom <- function(p, prob, pstr0 = 0) {
LLL = max(length(p), length(prob), length(pstr0))
ans = p = rep(p, len = LLL)
prob = rep(prob, len = LLL)
@@ -2543,7 +2670,7 @@ qzigeom = function(p, prob, pstr0 = 0) {
-rzigeom = function(n, prob, pstr0 = 0) {
+rzigeom <- function(n, prob, pstr0 = 0) {
use.n = if ((length.n <- length(n)) > 1) length.n else
if (!is.Numeric(n, integer.valued = TRUE,
allowable.length = 1, positive = TRUE))
@@ -2578,24 +2705,28 @@ rzigeom = function(n, prob, pstr0 = 0) {
- zigeometric = function(lprob = "logit", eprob = list(),
- lpstr0 = "logit", epstr0 = list(),
- iprob = NULL, ipstr0 = NULL,
- imethod = 1,
- bias.red = 0.5,
- zero = 2)
+ zigeometric <- function(lprob = "logit",
+ lpstr0 = "logit",
+ iprob = NULL, ipstr0 = NULL,
+ imethod = 1,
+ bias.red = 0.5,
+ zero = 2)
{
expected = TRUE
- if (mode(lprob) != "character" && mode(lprob) != "name")
- lprob = as.character(substitute(lprob))
- if (mode(lpstr0) != "character" && mode(lpstr0) != "name")
- lpstr0 = as.character(substitute(lpstr0))
- if (!is.list(eprob)) eprob = list()
- if (!is.list(epstr0)) epstr0 = list()
+
+ lprob <- as.list(substitute(lprob))
+ eprob <- link2list(lprob)
+ lprob <- attr(eprob, "function.name")
+
+ lpstr0 <- as.list(substitute(lpstr0))
+ epstr0 <- link2list(lpstr0)
+ lpstr0 <- attr(epstr0, "function.name")
+
+
if (length(iprob))
@@ -2628,11 +2759,11 @@ rzigeom = function(n, prob, pstr0 = 0) {
namesof("pstr0", lpstr0, earg = epstr0), "\n",
"Mean: (1 - pstr0) * (1 - prob) / prob"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(Musual = 2,
- zero = .zero)
+ zero = .zero )
}, list( .zero = zero ))),
initialize = eval(substitute(expression({
if (ncol(cbind(y)) != 1)
@@ -2640,12 +2771,20 @@ rzigeom = function(n, prob, pstr0 = 0) {
if (any(y < 0))
stop("all responses must be >= 0")
- if (any(y != round(y)))
- stop("response should be integer-valued")
- predictors.names =
- c(namesof("prob", .lprob, earg = .earg, tag = FALSE),
- namesof("pstr0", .lpstr0, earg = .epstr0, tag = FALSE))
+
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 1,
+ Is.integer.y = TRUE)
+
+
+
+
+
+ predictors.names <-
+ c(namesof("prob", .lprob, earg = .eprob, tag = FALSE),
+ namesof("pstr0", .lpstr0, earg = .epstr0, tag = FALSE))
if (!length(etastart)) {
prob.init = if ( .imethod == 3)
@@ -2713,7 +2852,7 @@ rzigeom = function(n, prob, pstr0 = 0) {
pstr0 = eta2theta(eta[, 2], .lpstr0 , earg = .epstr0 )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
- sum(w * dzigeom(x = y, prob = prob, pstr0 = pstr0, log = TRUE))
+ sum(c(w) * dzigeom(x = y, prob = prob, pstr0 = pstr0, log = TRUE))
}
}, list( .lprob = lprob, .lpstr0 = lpstr0,
.eprob = eprob, .epstr0 = epstr0 ))),
@@ -2763,17 +2902,18 @@ rzigeom = function(n, prob, pstr0 = 0) {
wz = matrix(as.numeric(NA), nrow = n, ncol = dimm(M))
if ( .expected ) {
- wz[,iam(1,1,M)] = ed2l.dprob2 * dprob.deta^2
- wz[,iam(2,2,M)] = ed2l.dpstr02 * dpstr0.deta^2
- wz[,iam(1,2,M)] = ed2l.dpstr0.prob * dprob.deta * dpstr0.deta
+ wz[,iam(1, 1, M)] = ed2l.dprob2 * dprob.deta^2
+ wz[,iam(2, 2, M)] = ed2l.dpstr02 * dpstr0.deta^2
+ wz[,iam(1, 2, M)] = ed2l.dpstr0.prob * dprob.deta * dpstr0.deta
} else {
- wz[,iam(1,1,M)] = od2l.dprob2 * dprob.deta^2
- wz[,iam(2,2,M)] = od2l.dpstr02 * dpstr0.deta^2
- wz[,iam(1,2,M)] = od2l.dpstr0.prob * dprob.deta * dpstr0.deta
+ wz[,iam(1, 1, M)] = od2l.dprob2 * dprob.deta^2
+ wz[,iam(2, 2, M)] = od2l.dpstr02 * dpstr0.deta^2
+ wz[,iam(1, 2, M)] = od2l.dpstr0.prob * dprob.deta * dpstr0.deta
}
- c(w) * wz
+
+ w.wz.merge(w = w, wz = wz, n = n, M = M, ndepy = 1)
}), list( .lprob = lprob, .lpstr0 = lpstr0,
.expected = expected,
.eprob = eprob, .epstr0 = epstr0 ))))
@@ -2784,8 +2924,8 @@ rzigeom = function(n, prob, pstr0 = 0) {
-dzageom = function(x, prob, pobs0 = 0, log = FALSE) {
- if (!is.logical(log.arg <- log))
+dzageom <- function(x, prob, pobs0 = 0, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -2814,7 +2954,7 @@ dzageom = function(x, prob, pobs0 = 0, log = FALSE) {
-pzageom = function(q, prob, pobs0 = 0) {
+pzageom <- function(q, prob, pobs0 = 0) {
LLL = max(length(q), length(prob), length(pobs0))
if (length(q) != LLL) q = rep(q, len = LLL);
@@ -2833,7 +2973,7 @@ pzageom = function(q, prob, pobs0 = 0) {
}
-qzageom = function(p, prob, pobs0 = 0) {
+qzageom <- function(p, prob, pobs0 = 0) {
LLL = max(length(p), length(prob), length(pobs0))
if (length(p) != LLL) p = rep(p, len = LLL);
@@ -2852,7 +2992,7 @@ qzageom = function(p, prob, pobs0 = 0) {
}
-rzageom = function(n, prob, pobs0 = 0) {
+rzageom <- function(n, prob, pobs0 = 0) {
use.n = if ((length.n <- length(n)) > 1) length.n else
if (!is.Numeric(n, integer.valued = TRUE,
allowable.length = 1, positive = TRUE))
@@ -2874,8 +3014,8 @@ rzageom = function(n, prob, pobs0 = 0) {
-dzabinom = function(x, size, prob, pobs0 = 0, log = FALSE) {
- if (!is.logical(log.arg <- log))
+dzabinom <- function(x, size, prob, pobs0 = 0, log = FALSE) {
+ if (!is.logical(log.arg <- log) || length(log) != 1)
stop("bad input for argument 'log'")
rm(log)
@@ -2905,7 +3045,7 @@ dzabinom = function(x, size, prob, pobs0 = 0, log = FALSE) {
-pzabinom = function(q, size, prob, pobs0 = 0) {
+pzabinom <- function(q, size, prob, pobs0 = 0) {
LLL = max(length(q), length(size), length(prob), length(pobs0))
if (length(q) != LLL) q = rep(q, len = LLL);
@@ -2926,7 +3066,7 @@ pzabinom = function(q, size, prob, pobs0 = 0) {
}
-qzabinom = function(p, size, prob, pobs0 = 0) {
+qzabinom <- function(p, size, prob, pobs0 = 0) {
LLL = max(length(p), length(size), length(prob), length(pobs0))
if (length(p) != LLL) p = rep(p, len = LLL);
@@ -2947,7 +3087,7 @@ qzabinom = function(p, size, prob, pobs0 = 0) {
}
-rzabinom = function(n, size, prob, pobs0 = 0) {
+rzabinom <- function(n, size, prob, pobs0 = 0) {
use.n = if ((length.n <- length(n)) > 1) length.n else
if (!is.Numeric(n, integer.valued = TRUE,
allowable.length = 1, positive = TRUE))
@@ -2964,23 +3104,28 @@ rzabinom = function(n, size, prob, pobs0 = 0) {
- zabinomial = function(lprob = "logit", eprob = list(),
- lpobs0 = "logit", epobs0 = list(),
- iprob = NULL, ipobs0 = NULL,
- imethod = 1,
- zero = 2)
+ zabinomial <- function(lprob = "logit",
+ lpobs0 = "logit",
+ iprob = NULL, ipobs0 = NULL,
+ imethod = 1,
+ zero = 2)
{
- if (mode(lprob) != "character" && mode(lprob) != "name")
- lprob = as.character(substitute(lprob))
- if (mode(lpobs0) != "character" && mode(lpobs0) != "name")
- lpobs0 = as.character(substitute(lpobs0))
- if (!is.list(eprob)) eprob = list()
- if (!is.list(epobs0)) epobs0 = list()
+
+ lprob <- as.list(substitute(lprob))
+ eprob <- link2list(lprob)
+ lprob <- attr(eprob, "function.name")
+
+ lpobs0 <- as.list(substitute(lpobs0))
+ epobs0 <- link2list(lpobs0)
+ lpobs0 <- attr(epobs0, "function.name")
+
+
+
if (length(iprob))
if (!is.Numeric(iprob, positive = TRUE) ||
@@ -3010,15 +3155,15 @@ rzabinom = function(n, size, prob, pobs0 = 0) {
namesof("pobs0", lpobs0, earg = epobs0), "\n",
"Mean: (1 - pobs0) * prob / (1 - (1 - prob)^size)"),
constraints = eval(substitute(expression({
- constraints <- cm.zero.vgam(constraints, x, .zero, M)
+ constraints <- cm.zero.vgam(constraints, x, .zero , M)
}), list( .zero = zero ))),
infos = eval(substitute(function(...) {
list(Musual = 2,
- zero = .zero)
+ zero = .zero )
}, list( .zero = zero ))),
initialize = eval(substitute(expression({
if (!all(w == 1))
- extra$orig.w = w
+ extra$orig.w <- w
@@ -3074,14 +3219,14 @@ rzabinom = function(n, size, prob, pobs0 = 0) {
- predictors.names =
+ predictors.names <-
c(namesof("prob" , .lprob , earg = .eprob , tag = FALSE),
namesof("pobs0", .lpobs0 , earg = .epobs0 , tag = FALSE))
- orig.w = if (length(extra$orig.w)) extra$orig.w else 1
- new.w = if (length(extra$new.w)) extra$new.w else 1
- Size = new.w / orig.w
+ orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
+ new.w <- if (length(extra$new.w)) extra$new.w else 1
+ Size <- new.w / orig.w
phi.init = if (length( .ipobs0 )) .ipobs0 else {
prob0.est = sum(Size[y == 0]) / sum(Size)
@@ -3115,21 +3260,21 @@ rzabinom = function(n, size, prob, pobs0 = 0) {
.imethod = imethod ))),
linkinv = eval(substitute(function(eta, extra = NULL) {
- prob = eta2theta(eta[, 1], .lprob, earg = .eprob )
- phi0 = eta2theta(eta[, 2], .lpobs0, earg = .epobs0 )
- orig.w = if (length(extra$orig.w)) extra$orig.w else 1
- new.w = if (length(extra$new.w)) extra$new.w else 1
- Size = new.w / orig.w
+ prob <- eta2theta(eta[, 1], .lprob, earg = .eprob )
+ phi0 <- eta2theta(eta[, 2], .lpobs0, earg = .epobs0 )
+ orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
+ new.w <- if (length(extra$new.w)) extra$new.w else 1
+ Size <- new.w / orig.w
(1 - phi0) * prob / (1 - (1 - prob)^Size)
}, list( .lprob = lprob, .lpobs0 = lpobs0,
.eprob = eprob, .epobs0 = epobs0 ))),
last = eval(substitute(expression({
- misc$link = c(prob = .lprob, pobs0 = .lpobs0 )
- misc$earg = list(prob = .eprob, pobs0 = .epobs0 )
- misc$imethod = .imethod
- misc$zero = .zero
- misc$expected = TRUE
+ misc$link <- c(prob = .lprob, pobs0 = .lpobs0 )
+ misc$earg <- list(prob = .eprob, pobs0 = .epobs0 )
+ misc$imethod <- .imethod
+ misc$zero <- .zero
+ misc$expected <- TRUE
}), list( .lprob = lprob, .lpobs0 = lpobs0,
.eprob = eprob, .epobs0 = epobs0,
.zero = zero,
@@ -3137,11 +3282,11 @@ rzabinom = function(n, size, prob, pobs0 = 0) {
loglikelihood = eval(substitute(
function(mu, y, w, residuals = FALSE, eta, extra = NULL) {
- orig.w = if (length(extra$orig.w)) extra$orig.w else 1
- new.w = if (length(extra$new.w)) extra$new.w else 1
- Size = new.w / orig.w
- prob = eta2theta(eta[, 1], .lprob , earg = .eprob )
- pobs0 = eta2theta(eta[, 2], .lpobs0 , earg = .epobs0 )
+ orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
+ new.w <- if (length(extra$new.w)) extra$new.w else 1
+ Size <- new.w / orig.w
+ prob <- eta2theta(eta[, 1], .lprob , earg = .eprob )
+ pobs0 <- eta2theta(eta[, 2], .lpobs0 , earg = .epobs0 )
if (residuals) stop("loglikelihood residuals not ",
"implemented yet") else {
sum(orig.w * dzabinom(x = round(y * Size), size = Size,
@@ -3153,18 +3298,18 @@ rzabinom = function(n, size, prob, pobs0 = 0) {
vfamily = c("zabinomial"),
deriv = eval(substitute(expression({
- NOS = if (length(extra$NOS)) extra$NOS else 1
- Musual = 2
+ NOS <- if (length(extra$NOS)) extra$NOS else 1
+ Musual <- 2
- orig.w = if (length(extra$orig.w)) extra$orig.w else 1
- new.w = if (length(extra$new.w)) extra$new.w else 1
- Size = new.w / orig.w
+ orig.w <- if (length(extra$orig.w)) extra$orig.w else 1
+ new.w <- if (length(extra$new.w)) extra$new.w else 1
+ Size <- new.w / orig.w
- prob = eta2theta(eta[, 1], .lprob , earg = .eprob )
- phi0 = eta2theta(eta[, 2], .lpobs0 , earg = .epobs0 )
+ prob <- eta2theta(eta[, 1], .lprob , earg = .eprob )
+ phi0 <- eta2theta(eta[, 2], .lpobs0 , earg = .epobs0 )
- dprob.deta = dtheta.deta(prob, .lprob , earg = .eprob )
- dphi0.deta = dtheta.deta(phi0, .lpobs0, earg = .epobs0 )
+ dprob.deta <- dtheta.deta(prob, .lprob , earg = .eprob )
+ dphi0.deta <- dtheta.deta(phi0, .lpobs0, earg = .epobs0 )
df0.dprob = -Size * (1 - prob)^(Size - 1)
df02.dprob2 = Size * (Size - 1) * (1 - prob)^(Size - 2)
@@ -3206,7 +3351,7 @@ rzabinom = function(n, size, prob, pobs0 = 0) {
term2 = -(1 - phi0) * df02.dprob2 / oneminusf0
term3 = -(1 - phi0) * (df0.dprob / oneminusf0)^2
ed2l.dprob2 = term1 + term2 + term3
- wz[, iam(1,1,M)] = ed2l.dprob2 * dprob.deta^2
+ wz[, iam(1, 1, M)] = ed2l.dprob2 * dprob.deta^2
mu.phi0 = phi0
@@ -3216,7 +3361,8 @@ rzabinom = function(n, size, prob, pobs0 = 0) {
} else {
(dphi0.deta^2) / tmp100
}
- wz[, iam(2,2,M)] = tmp200
+ wz[, iam(2, 2, M)] = tmp200
+
c(orig.w) * wz
}), list( .lprob = lprob, .lpobs0 = lpobs0,
@@ -3227,20 +3373,23 @@ rzabinom = function(n, size, prob, pobs0 = 0) {
- zageometric = function(lpobs0 = "logit", lprob = "logit",
- epobs0 = list(), eprob = list(),
- imethod = 1,
- ipobs0 = NULL, iprob = NULL,
- zero = NULL) {
+ zageometric <- function(lpobs0 = "logit", lprob = "logit",
+ imethod = 1,
+ ipobs0 = NULL, iprob = NULL,
+ zero = NULL) {
+
+
+
+ lpobs0 <- as.list(substitute(lpobs0))
+ epobs0 <- link2list(lpobs0)
+ lpobs0 <- attr(epobs0, "function.name")
+
+ lprob <- as.list(substitute(lprob))
+ eprob <- link2list(lprob)
+ lprob <- attr(eprob, "function.name")
- if (mode(lpobs0) != "character" && mode(lpobs0) != "name")
- lpobs0 = as.character(substitute(lpobs0))
- if (mode(lprob) != "character" && mode(lprob) != "name")
- lprob = as.character(substitute(lprob))
- if (!is.list(epobs0)) epobs0 = list()
- if (!is.list(eprob)) eprob = list()
if (!is.Numeric(imethod, allowable.length = 1,
integer.valued = TRUE, positive = TRUE) ||
@@ -3272,26 +3421,39 @@ rzabinom = function(n, size, prob, pobs0 = 0) {
}), list( .zero = zero ))),
initialize = eval(substitute(expression({
Musual <- 2
- y <- as.matrix(y)
- if (any(y != round(y )))
- stop("the response must be integer-valued")
if (any(y < 0))
stop("the response must not have negative values")
+ temp5 <-
+ w.y.check(w = w, y = y,
+ ncol.w.max = 1,
+ ncol.y.max = 1,
+ Is.integer.y = TRUE,
+ out.wy = TRUE,
+ colsyperw = 1,
+ maximize = TRUE)
+ w <- temp5$w
+ y <- temp5$y
+
+
+
+
extra$y0 = y0 = ifelse(y == 0, 1, 0)
extra$NOS = NOS = ncoly = ncol(y) # Number of species
extra$skip.these = skip.these = matrix(as.logical(y0), n, NOS)
- mynames1 = if (ncoly == 1) "pobs0" else paste("pobs0", 1:ncoly, sep = "")
- mynames2 = if (ncoly == 1) "prob" else paste("prob", 1:ncoly, sep = "")
- predictors.names =
+ mynames1 <- if (ncoly == 1) "pobs0" else
+ paste("pobs0", 1:ncoly, sep = "")
+ mynames2 <- if (ncoly == 1) "prob" else
+ paste("prob", 1:ncoly, sep = "")
+ predictors.names <-
c(namesof(mynames1, .lpobs0 , earg = .epobs0 , tag = FALSE),
namesof(mynames2, .lprob , earg = .eprob , tag = FALSE))[
interleave.VGAM(Musual*NOS, M = Musual)]
if (!length(etastart)) {
- foo = function(x) mean(as.numeric(x == 0))
+ foo <- function(x) mean(as.numeric(x == 0))
phi0.init = matrix(apply(y, 2, foo), n, ncoly, byrow = TRUE)
if (length( .ipobs0 ))
phi0.init = matrix( .ipobs0 , n, ncoly, byrow = TRUE)
@@ -3311,7 +3473,7 @@ rzabinom = function(n, size, prob, pobs0 = 0) {
etastart = cbind(theta2eta(phi0.init, .lpobs0 , earg = .epobs0 ),
- theta2eta(prob.init, .lprob , earg = .eprob ))
+ theta2eta(prob.init, .lprob , earg = .eprob ))
etastart = etastart[, interleave.VGAM(ncol(etastart), M = Musual)]
}
}), list( .lpobs0 = lpobs0, .lprob = lprob,
@@ -3335,11 +3497,13 @@ rzabinom = function(n, size, prob, pobs0 = 0) {
rep( .lprob , len = NOS))
temp.names = temp.names[interleave.VGAM(Musual*NOS, M = Musual)]
misc$link = temp.names
+
misc$expected = TRUE
misc$earg = vector("list", Musual * NOS)
misc$imethod = .imethod
misc$ipobs0 = .ipobs0
misc$iprob = .iprob
+ misc$multipleResponses <- TRUE
names(misc$link) <-
names(misc$earg) <-
@@ -3365,7 +3529,7 @@ rzabinom = function(n, size, prob, pobs0 = 0) {
if (residuals)
stop("loglikelihood residuals not implemented yet") else {
- sum(w * dzageom(x = y, pobs0 = phi0, prob = prob, log = TRUE))
+ sum(c(w) * dzageom(x = y, pobs0 = phi0, prob = prob, log = TRUE))
}
}, list( .lpobs0 = lpobs0, .lprob = lprob,
.epobs0 = epobs0, .eprob = eprob ))),
@@ -3421,6 +3585,8 @@ rzabinom = function(n, size, prob, pobs0 = 0) {
wz = wz[, interleave.VGAM(ncol(wz), M = Musual)]
+
+
wz
}), list( .lpobs0 = lpobs0,
.epobs0 = epobs0 ))))
diff --git a/R/links.q b/R/links.q
index fabed11..6f30c4d 100644
--- a/R/links.q
+++ b/R/links.q
@@ -5,10 +5,11 @@
- ToString = function(x) paste(x, collapse = ", ")
+ToString <- function(x)
+ paste(x, collapse = ", ")
@@ -16,251 +17,356 @@
-
- TypicalVGAMfamilyFunction <- function(lsigma = "loge", esigma = list(),
- isigma = NULL, parallel = TRUE,
- shrinkage.init = 0.95,
- nointercept = NULL, imethod = 1,
- prob.x = c(0.15, 0.85),
- mv = FALSE, whitespace = FALSE,
- oim = FALSE, nsimEIM = 100,
- zero = NULL) {
+ TypicalVGAMfamilyFunction <-
+ function(lsigma = "loge",
+ isigma = NULL, parallel = TRUE,
+ shrinkage.init = 0.95,
+ nointercept = NULL, imethod = 1,
+ probs.x = c(0.15, 0.85),
+ probs.y = c(0.25, 0.50, 0.75),
+ mv = FALSE, earg.link = FALSE,
+ whitespace = FALSE, bred = FALSE,
+ oim = FALSE, nsimEIM = 100,
+ zero = NULL) {
NULL
}
-TypicalVGAMlinkFunction <- function(theta,
- earg = list(), inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) {
- NULL
+
+TypicalVGAMlinkFunction <-
+ function(theta,
+ someParameter = 0,
+ bvalue = NULL, # .Machine$double.xmin is an alternative
+ inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE) {
+ NULL
}
- namesof <- function(theta,
- link,
- earg = list(),
- tag = FALSE,
- short = TRUE) {
- string <- paste(link,
- "(theta = theta, earg = earg, short=short, tag=tag)", sep = "")
- calls <- parse(text=string)[[1]]
- ans <- eval(calls)
- return(ans)
-}
- theta2eta <- function(theta, link, earg = list()) {
- string <- paste(link, "(theta = theta, earg = earg)", sep = "")
- calls <- parse(text=string)[[1]]
- eval(calls)
-}
+ loge <- function(theta,
+ bvalue = NULL, # .Machine$double.xmin is an alternative
+ inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+{
+ if (is.character(theta)) {
+ string <- if (short)
+ paste("log(", theta, ")", sep = "") else
+ paste("log(", theta, ")", sep = "")
+ if (tag)
+ string <- paste("Log:", string)
+ return(string)
+ }
+ if (!inverse && length(bvalue))
+ theta[theta <= 0.0] <- bvalue
- eta2theta <- function(theta, link = "identity", earg = list()) {
- if (is.null(link))
- link <- "identity"
+ if (inverse) {
+ if (deriv > 0) {
+ 1 / Recall(theta = theta,
+ bvalue = bvalue,
+ inverse = FALSE, deriv = deriv)
+ } else {
+ exp(theta)
+ }
+ } else {
+ switch(deriv + 1, {
+ log(theta)},
+ theta,
+ theta)
+ }
+}
- llink <- length(link)
- if (llink == 1) {
- string <- paste(link, "(theta = theta, earg = earg, inverse = TRUE)", sep = "")
- calls <- parse(text=string)[[1]]
- return(eval(calls))
- } else
- if (llink > 1) {
- if (is.matrix(theta) && llink == ncol(theta)) {
+ logoff <- function(theta,
+ offset = 0,
+ inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE) {
+ if (!is.Numeric(offset))
+ stop("bad input for argument 'offset'")
+ if (is.character(theta)) {
+ string <- if (short)
+ paste("Logoff(", theta,
+ ", offset = ", as.character(offset),
+ ")", sep = "") else
+ paste("log(",
+ as.character(offset),
+ "+",
+ theta,
+ ")", sep = "")
+ if (tag)
+ string <- paste("Log with offset:", string)
+ return(string)
+ }
- ans <- NULL
- for(iii in 1:llink) {
- use.earg = if (is.list(earg) && length(earg)==llink &&
- is.list(earg[[iii]])) earg[[iii]] else earg
- string = paste(link[iii],
- "(theta = theta[,iii], earg=use.earg, inverse = TRUE)",
- sep = "")
- calls <- parse(text=string)[[1]]
- ans <- cbind(ans, eval(calls))
- }
- } else {
- if (length(theta) < llink)
- theta = rep(theta, len=llink)
-
- if (length(theta) != llink)
- stop("length of 'theta' and 'link' do not match")
-
- ans <- NULL
- for(iii in 1:llink) {
- string = paste(link[iii],
- "(theta = theta[iii], earg = earg, inverse = TRUE)",
- sep = "")
- calls <- parse(text=string)[[1]]
- ans <- c(ans, eval(calls))
- }
- }
- return(ans)
- } else
- stop("length(link) == 0 not allowed")
+ if (inverse) {
+ if (deriv > 0) {
+ 1 / Recall(theta = theta,
+ offset = offset,
+ inverse = FALSE, deriv = deriv)
+ } else {
+ exp(theta) - offset
+ }
+ } else {
+ switch(deriv + 1,
+ log(theta + offset),
+ theta + offset,
+ theta + offset)
+ }
}
- dtheta.deta <- function(theta, link, earg = list()) {
- string <- paste(link, "(theta = theta, earg = earg, deriv = 1)", sep = "")
- calls <- parse(text=string)[[1]]
- eval(calls)
+ identity <- function(theta,
+ inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE) {
+ if (is.character(theta)) {
+ string <- theta
+ if (tag)
+ string <- paste("Identity:", string)
+ return(string)
+ }
+
+ if (inverse) {
+ if (deriv > 0) {
+ 1 / Recall(theta = theta,
+ inverse = FALSE, deriv = deriv)
+ } else {
+ theta
+ }
+ } else {
+ switch(deriv+1,
+ theta,
+ theta*0 + 1,
+ theta*0)
+ }
}
- d2theta.deta2 <- function(theta, link, earg = list()) {
- string <- paste(link, "(theta = theta, earg = earg, deriv = 2)", sep = "")
- calls <- parse(text=string)[[1]]
- eval(calls)
+
+ nidentity <- function(theta,
+ inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+{
+ if (is.character(theta)) {
+ string <- paste("-", theta, sep = "")
+ if (tag)
+ string <- paste("Negative-identity:", string)
+ return(string)
+ }
+
+ if (inverse) {
+ if (deriv > 0) {
+ 1 / Recall(theta = theta,
+ inverse = FALSE, deriv = deriv)
+ } else {
+ -theta
+ }
+ } else {
+ switch(deriv+1,
+ -theta,
+ theta*0 - 1,
+ theta*0)
+ }
}
-.all.links = c("cloglog",
- "fisherz", "fsqrt", "identity", "inverse",
- "logc", "loge", "logit", "loglog",
- "logoff", "nreciprocal", "nloge",
- "powl", "probit", "reciprocal", "rhobit",
- "golf", "polf", "nbolf", "nbolf2")
+ logit <- function(theta,
+ bvalue = NULL, # .Machine$double.eps is an alternative
+ inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE) {
+ if (is.character(theta)) {
+ string <- if (short)
+ paste("logit(", theta, ")", sep = "") else
+ paste("log(", theta, "/(1-", theta, "))", sep = "")
+ if (tag)
+ string <- paste("Logit:", string)
+ return(string)
+ }
+
+ if (!inverse && length(bvalue)) {
+ theta[theta <= 0.0] <- bvalue
+ theta[theta >= 1.0] <- 1.0 - bvalue
+ }
+ if (inverse) {
+ if (deriv > 0) {
+ 1 / Recall(theta = theta,
+ bvalue = bvalue,
+ inverse = FALSE, deriv = deriv)
+ } else {
+ exp(theta - log1p(exp(theta)))
+ }
+ } else {
+ switch(deriv+1, {
+ temp2 <- log(theta) - log1p(-theta)
+ if (any(near0.5 <- (abs(theta - 0.5) < 0.000125)))
+ temp2[near0.5] <- log(theta[near0.5] / (1 - theta[near0.5]))
+ temp2
+ },
+ exp(log(theta) + log1p(-theta)),
+ exp(log(theta) + log1p(-theta)) * (1 - 2 * theta))
+ }
+}
+
+
+
+
- loglog <- function(theta, earg = list(), inverse = FALSE, deriv = 0,
+ loglog <- function(theta,
+ bvalue = NULL, # .Machine$double.eps is an alternative
+ inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE)
{
- if (is.character(theta)) {
- string <- if (short)
- paste("loglog(", theta, ")", sep = "") else
- paste("log(log(", theta, "))", sep = "")
- if (tag)
- string <- paste("Log-Log:", string)
- return(string)
- }
- if (!inverse && is.list(earg) && length(earg$bval))
- theta[theta <= 1.0] <- earg$bval
- if (inverse) {
- if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
- inverse = FALSE, deriv = deriv)
- } else {
- exp(exp(theta))
- }
+ if (is.character(theta)) {
+ string <- if (short)
+ paste("loglog(", theta, ")", sep = "") else
+ paste("log(log(", theta, "))", sep = "")
+ if (tag)
+ string <- paste("Log-Log:", string)
+ return(string)
+ }
+
+ if (!inverse && length(bvalue))
+ theta[theta <= 1.0] <- bvalue
+
+ if (inverse) {
+ if (deriv > 0) {
+ 1 / Recall(theta = theta,
+ bvalue = bvalue,
+ inverse = FALSE, deriv = deriv)
} else {
- switch(deriv+1, {
- log(log(theta))},
- theta * log(theta),
- { junk <- log(theta)
- -junk^2 / (1 + junk)
- },
- stop("argument 'deriv' unmatched"))
+ exp(exp(theta))
}
+ } else {
+ switch(deriv+1, {
+ log(log(theta))},
+ theta * log(theta),
+ { junk <- log(theta)
+ -junk^2 / (1 + junk)
+ },
+ stop("argument 'deriv' unmatched"))
+ }
}
- cloglog <- function(theta, earg = list(), inverse = FALSE, deriv = 0,
+
+ cloglog <- function(theta,
+ bvalue = NULL, # .Machine$double.eps is an alternative
+ inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE)
{
- if (is.character(theta)) {
- string <- if (short)
- paste("cloglog(", theta, ")", sep = "") else
- paste("log(-log(1-", theta, "))", sep = "")
- if (tag)
- string <- paste("Complementary log-log:", string)
- return(string)
- }
- if (!inverse && is.list(earg) && length(earg$bval)) {
- theta[theta <= 0.0] <- earg$bval
- theta[theta >= 1.0] <- 1.0 - earg$bval
- }
- if (inverse) {
- if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
- inverse = FALSE, deriv = deriv)
- } else {
- junk <- exp(theta)
- -expm1(-junk)
- }
+ if (is.character(theta)) {
+ string <- if (short)
+ paste("cloglog(", theta, ")", sep = "") else
+ paste("log(-log(1-", theta, "))", sep = "")
+ if (tag)
+ string <- paste("Complementary log-log:", string)
+ return(string)
+ }
+
+ if (!inverse && length(bvalue)) {
+ theta[theta <= 0.0] <- bvalue
+ theta[theta >= 1.0] <- 1.0 - bvalue
+ }
+
+ if (inverse) {
+ if (deriv > 0) {
+ 1 / Recall(theta = theta,
+ bvalue = bvalue,
+ inverse = FALSE, deriv = deriv)
} else {
- switch(deriv+1, {
- log(-log1p(-theta)) },
- -(1 - theta) * log1p(-theta),
- { junk <- log1p(-theta)
- -(1 - theta) * (1 + junk) * junk
- },
- stop("argument 'deriv' unmatched"))
+ junk <- exp(theta)
+ -expm1(-junk)
}
+ } else {
+ switch(deriv+1, {
+ log(-log1p(-theta)) },
+ -(1 - theta) * log1p(-theta),
+ { junk <- log1p(-theta)
+ -(1 - theta) * (1 + junk) * junk
+ },
+ stop("argument 'deriv' unmatched"))
+ }
}
- probit <- function(theta, earg = list(), inverse = FALSE, deriv = 0,
+ probit <- function(theta,
+ bvalue = NULL, # .Machine$double.eps is an alternative
+ inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE)
{
- if (is.character(theta)) {
- string <- if (short)
- paste("probit(", theta, ")", sep = "") else
- paste("qnorm(", theta, ")", sep = "")
- if (tag)
- string <- paste("Probit:", string)
- return(string)
- }
- if (!inverse && is.list(earg) && length(earg$bval)) {
- theta[theta <= 0.0] <- earg$bval
- theta[theta >= 1.0] <- 1-earg$bval
+ if (is.character(theta)) {
+ string <- if (short)
+ paste("probit(", theta, ")", sep = "") else
+ paste("qnorm(", theta, ")", sep = "")
+ if (tag)
+ string <- paste("Probit:", string)
+ return(string)
+ }
+
+ if (!inverse && length(bvalue)) {
+ theta[theta <= 0.0] <- bvalue
+ theta[theta >= 1.0] <- 1 - bvalue
+ }
+
+ if (inverse) {
+ if (deriv > 0) {
+ 1 / Recall(theta = theta,
+ bvalue = bvalue,
+ inverse = FALSE, deriv = deriv)
+ } else {
+ ans <- pnorm(theta)
+ if (is.matrix(theta))
+ dim(ans) <- dim(theta)
+ ans
}
- if (inverse) {
- if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
- inverse = FALSE, deriv = deriv)
+ } else {
+ switch(deriv+1, {
+ ans <- qnorm(theta)
+ if (is.matrix(theta))
+ dim(ans) <- dim(theta)
+ ans
+ },
+ {
+ if (is.matrix(theta)) {
+ ans <- dnorm(qnorm(theta))
+ dim(ans) <- dim(theta)
+ ans
+ } else dnorm(qnorm(as.vector(theta)))
+ },
+ {
+ junk <- qnorm(theta)
+ ans <- -junk * dnorm(junk)
+ if (is.vector(theta)) ans else
+ if (is.matrix(theta)) {
+ dim(ans) <- dim(theta)
+ ans
} else {
- ans <- pnorm(theta)
- if (is.matrix(theta))
- dim(ans) <- dim(theta)
+ warning("can only handle vectors and matrices;",
+ " converting to vector")
ans
}
- } else {
- switch(deriv+1, {
- ans <- qnorm(theta)
- if (is.matrix(theta))
- dim(ans) <- dim(theta)
- ans
- },
- {
- if (is.matrix(theta)) {
- ans <- dnorm(qnorm(theta))
- dim(ans) <- dim(theta)
- ans
- } else dnorm(qnorm(as.vector(theta)))
- },
- {
- junk <- qnorm(theta)
- ans <- -junk * dnorm(junk)
- if (is.vector(theta)) ans else
- if (is.matrix(theta)) {
- dim(ans) <- dim(theta)
- ans
- } else {
- warning("can only handle vectors and matrices;",
- " converting to vector")
- ans
- }
- })
- }
+ })
+ }
}
@@ -270,204 +376,151 @@ TypicalVGAMlinkFunction <- function(theta,
- explink <- function(theta, earg = list(), inverse = FALSE, deriv = 0,
+ explink <- function(theta,
+ bvalue = NULL, # .Machine$double.eps is an alternative
+ inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE)
{
- if (is.character(theta)) {
- string <- if (short)
- paste("exp(", theta, ")", sep = "") else
- paste("exp(", theta, ")", sep = "")
- if (tag)
- string <- paste("Exp:", string)
- return(string)
- }
- if (!inverse && is.list(earg) && length(earg$bval))
- theta[theta <= 0.0] <- earg$bval
- if (inverse) {
- if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
- inverse = FALSE, deriv = deriv)
- } else {
- log(theta)
- }
+ if (is.character(theta)) {
+ string <- if (short)
+ paste("exp(", theta, ")", sep = "") else
+ paste("exp(", theta, ")", sep = "")
+ if (tag)
+ string <- paste("Exp:", string)
+ return(string)
+ }
+
+ if (!inverse && length(bvalue))
+ theta[theta <= 0.0] <- bvalue
+ if (inverse) {
+ if (deriv > 0) {
+ 1 / Recall(theta = theta,
+ bvalue = bvalue,
+ inverse = FALSE, deriv = deriv)
} else {
- switch(deriv+1, {
- exp(theta)},
- 1 / exp(theta),
- -1 / exp(theta * 2))
+ log(theta)
}
+ } else {
+ switch(deriv+1, {
+ exp(theta)},
+ 1 / exp(theta),
+ -1 / exp(theta * 2))
+ }
}
-
- loge <- function(theta, earg = list(), inverse = FALSE, deriv = 0,
- short = TRUE, tag = FALSE)
+ reciprocal <- function(theta,
+ bvalue = NULL, # .Machine$double.eps is an alternative
+ inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
{
- if (is.character(theta)) {
- string <- if (short)
- paste("log(", theta, ")", sep = "") else
- paste("log(", theta, ")", sep = "")
- if (tag)
- string <- paste("Log:", string)
- return(string)
- }
- if (!inverse && is.list(earg) && length(earg$bval))
- theta[theta <= 0.0] <- earg$bval
- if (inverse) {
- if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
- inverse = FALSE, deriv = deriv)
- } else {
- exp(theta)
- }
+ if (is.character(theta)) {
+ string <- paste("1/", theta, sep = "")
+ if (tag)
+ string <- paste("Reciprocal:", string)
+ return(string)
+ }
+
+ if (!inverse && length(bvalue))
+ theta[theta == 0.0] <- bvalue
+
+ if (inverse) {
+ if (deriv > 0) {
+ 1 / Recall(theta = theta,
+ bvalue = bvalue,
+ inverse = FALSE, deriv = deriv)
} else {
- switch(deriv+1, {
- log(theta)},
- theta,
- theta)
+ 1/theta
}
+ } else {
+ switch(deriv+1, {
+ 1/theta},
+ -theta^2,
+ 2*theta^3)
+ }
}
+ nloge <- function(theta,
+ bvalue = NULL, # .Machine$double.eps is an alternative
+ inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE) {
+ if (is.character(theta)) {
+ string <- if (short)
+ paste("-log(", theta, ")", sep = "") else
+ paste("-log(", theta, ")", sep = "")
+ if (tag)
+ string <- paste("Negative log:", string)
+ return(string)
+ }
- identity <- function(theta, earg = list(), inverse = FALSE, deriv = 0,
- short = TRUE, tag = FALSE) {
- if (is.character(theta)) {
- string <- theta
- if (tag)
- string <- paste("Identity:", string)
- return(string)
- }
- if (inverse) {
- if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
- inverse = FALSE, deriv = deriv)
- } else {
- theta
- }
- } else {
- switch(deriv+1,
- theta,
- theta*0 + 1,
- theta*0)
- }
-}
-
- nidentity <- function(theta, earg = list(), inverse = FALSE, deriv = 0,
- short = TRUE, tag = FALSE)
-{
- if (is.character(theta)) {
- string <- paste("-", theta, sep = "")
- if (tag)
- string <- paste("Negative-Identity:", string)
- return(string)
- }
- if (inverse) {
- if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
- inverse = FALSE, deriv = deriv)
- } else {
- -theta
- }
+ if (!inverse && length(bvalue))
+ theta[theta <= 0.0] <- bvalue
+ if (inverse) {
+ if (deriv > 0) {
+ 1 / Recall(theta = theta,
+ bvalue = bvalue,
+ inverse = FALSE, deriv = deriv)
} else {
- switch(deriv+1,
- -theta,
- theta*0 - 1,
- theta*0)
+ exp(-theta)
}
+ } else {
+ switch(deriv+1, {
+ -log(theta)},
+ -theta,
+ theta)
+ }
}
- reciprocal <- function(theta, earg = list(), inverse.arg = FALSE, deriv = 0,
- short = TRUE, tag = FALSE)
-{
- if (is.character(theta)) {
- string <- paste("1/", theta, sep = "")
- if (tag)
- string <- paste("Reciprocal:", string)
- return(string)
- }
- if (!inverse.arg && is.list(earg) && length(earg$bval))
- theta[theta == 0.0] <- earg$bval
- if (inverse.arg) {
- if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
- inverse.arg = FALSE, deriv = deriv)
- } else {
- 1/theta
- }
- } else {
- switch(deriv+1, {
- 1/theta},
- -theta^2,
- 2*theta^3)
- }
-}
- nloge <- function(theta, earg = list(), inverse = FALSE, deriv = 0,
- short = TRUE, tag = FALSE) {
- if (is.character(theta)) {
- string <- if (short)
- paste("-log(", theta, ")", sep = "") else
- paste("-log(", theta, ")", sep = "")
- if (tag)
- string <- paste("Negative log:", string)
- return(string)
- }
- if (!inverse && is.list(earg) && length(earg$bval))
- theta[theta <= 0.0] <- earg$bval
- if (inverse) {
- if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
- inverse = FALSE, deriv = deriv)
- } else {
- exp(-theta)
- }
- } else {
- switch(deriv+1, {
- -log(theta)},
- -theta,
- theta)
- }
-}
+ nreciprocal <-
+ function(theta,
+ bvalue = NULL, # .Machine$double.eps is an alternative
+ inverse = FALSE,
+ deriv = 0, short = TRUE, tag = FALSE)
+{
+ if (is.character(theta)) {
+ string <- paste("-1/", theta, sep = "")
+ if (tag)
+ string <- paste("Negative reciprocal:", string)
+ return(string)
+ }
+ if (!inverse && length(bvalue))
+ theta[theta == 0.0] <- bvalue
- nreciprocal <- function(theta, earg = list(), inverse.arg = FALSE,
- deriv = 0, short = TRUE, tag = FALSE)
-{
- if (is.character(theta)) {
- string <- paste("-1/", theta, sep = "")
- if (tag)
- string <- paste("Negative reciprocal:", string)
- return(string)
- }
- if (!inverse.arg && is.list(earg) && length(earg$bval))
- theta[theta == 0.0] <- earg$bval
- if (inverse.arg) {
- if (deriv > 0) {
- 1 / nreciprocal(theta, earg = earg, inverse.arg = FALSE, deriv)
- } else {
- -1/theta
- }
+ if (inverse) {
+ if (deriv > 0) {
+ 1 / Recall(theta,
+ bvalue = bvalue,
+ inverse = FALSE, deriv = deriv)
} else {
- switch(deriv+1, {
- -1/theta},
- theta^2,
- 2*theta^3)
+ -1/theta
}
+ } else {
+ switch(deriv+1, {
+ -1/theta},
+ theta^2,
+ 2*theta^3)
+ }
}
- natural.ig <- function(theta, earg = list(), inverse = FALSE, deriv = 0,
- short = TRUE, tag = FALSE)
+
+ natural.ig <-
+ function(theta,
+ bvalue = NULL, # .Machine$double.eps is an alternative
+ inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
{
if (is.character(theta)) {
@@ -476,9 +529,12 @@ TypicalVGAMlinkFunction <- function(theta,
string <- paste("Negative inverse:", string)
return(string)
}
+
if (inverse) {
if (deriv > 0) {
- 1 / nreciprocal(theta, earg = earg, inverse.arg = FALSE, deriv)
+ 1 / nreciprocal(theta,
+ bvalue = bvalue,
+ inverse = FALSE, deriv = deriv)
} else {
1 / sqrt(-2*theta)
}
@@ -494,619 +550,721 @@ TypicalVGAMlinkFunction <- function(theta,
- rhobit <- function(theta, earg = list(), inverse = FALSE, deriv = 0,
+
+ rhobit <- function(theta,
+ bminvalue = NULL,
+ bmaxvalue = NULL,
+ inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE)
{
- if (is.character(theta)) {
- string <- if (short)
- paste("rhobit(", theta, ")", sep = "") else
- paste("log((1+", theta, ")/(1-", theta, "))", sep = "")
- if (tag)
- string <- paste("Rhobit:", string)
- return(string)
- }
+ if (is.character(theta)) {
+ string <- if (short)
+ paste("rhobit(", theta, ")", sep = "") else
+ paste("log((1+", theta, ")/(1-", theta, "))", sep = "")
+ if (tag)
+ string <- paste("Rhobit:", string)
+ return(string)
+ }
- if (!inverse && is.list(earg) && length(earg)) {
- bminvalue = if (length(earg$bminval)) earg$bminval else NULL
- bmaxvalue = if (length(earg$bmaxval)) earg$bmaxval else NULL
- if (!inverse && length(bminvalue)) theta[theta <= -1.0] <- bminvalue
- if (!inverse && length(bmaxvalue)) theta[theta >= 1.0] <- bmaxvalue
- }
+ if (!inverse) {
+ if (length(bminvalue)) theta[theta <= -1.0] <- bminvalue
+ if (length(bmaxvalue)) theta[theta >= 1.0] <- bmaxvalue
+ }
- if (inverse) {
- if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
- inverse = FALSE, deriv = deriv)
- } else {
- junk <- exp(theta)
- expm1(theta) / (junk+1.0)
- }
+ if (inverse) {
+ if (deriv > 0) {
+ 1 / Recall(theta = theta,
+ bminvalue = bminvalue,
+ bmaxvalue = bmaxvalue,
+ inverse = FALSE, deriv = deriv)
} else {
- switch(deriv+1, {
- log1p(theta) - log1p(-theta)},
- (1 - theta^2) / 2,
- (1 - theta^2)^2 / (4*theta))
+ junk <- exp(theta)
+ expm1(theta) / (junk+1.0)
}
+ } else {
+ switch(deriv+1, {
+ log1p(theta) - log1p(-theta)},
+ (1 - theta^2) / 2,
+ (1 - theta^2)^2 / (4*theta))
+ }
}
- fisherz <- function(theta, earg = list(), inverse = FALSE, deriv = 0,
+
+ fisherz <- function(theta,
+ bminvalue = NULL,
+ bmaxvalue = NULL,
+ inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE)
{
- if (is.character(theta)) {
- string <- if (short)
- paste("fisherz(", theta, ")", sep = "") else
- paste("(1/2)log((1+", theta, ")/(1-", theta, "))", sep = "")
- if (tag)
- string <- paste("Fisher's Z transformation:", string)
- return(string)
- }
-
- if (!inverse && is.list(earg) && length(earg)) {
- bminvalue = if (length(earg$bminval)) earg$bminval else NULL
- bmaxvalue = if (length(earg$bmaxval)) earg$bmaxval else NULL
- if (!inverse && length(bminvalue)) theta[theta <= -1.0] <- bminvalue
- if (!inverse && length(bmaxvalue)) theta[theta >= 1.0] <- bmaxvalue
- }
+ if (is.character(theta)) {
+ string <- if (short)
+ paste("fisherz(", theta, ")", sep = "") else
+ paste("(1/2)log((1+", theta, ")/(1-", theta, "))", sep = "")
+ if (tag)
+ string <- paste("Fisher's Z transformation:", string)
+ return(string)
+ }
- if (inverse) {
- if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
- inverse = FALSE, deriv = deriv)
- } else {
- junk <- exp(2*theta)
- expm1(2*theta) / (junk+1.0)
- }
+ if (!inverse) {
+ if (length(bminvalue)) theta[theta <= -1.0] <- bminvalue
+ if (length(bmaxvalue)) theta[theta >= 1.0] <- bmaxvalue
+ }
+
+ if (inverse) {
+ if (deriv > 0) {
+ 1 / Recall(theta = theta,
+ bminvalue = bminvalue,
+ bmaxvalue = bmaxvalue,
+ inverse = FALSE, deriv = deriv)
} else {
- switch(deriv+1,
- 0.5 * log1p(theta) - log1p(-theta),
- 1.0 - theta^2,
- (1.0 - theta^2)^2 / (2*theta))
+ junk <- exp(2*theta)
+ expm1(2*theta) / (junk+1.0)
+ }
+ } else {
+ switch(deriv+1,
+ 0.5 * log1p(theta) - log1p(-theta),
+ 1.0 - theta^2,
+ (1.0 - theta^2)^2 / (2*theta))
}
}
-fsqrt <- function(theta, earg = list(min = 0, max = 1, mux=sqrt(2)),
- inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) {
- min = 0; max = 1; mux=sqrt(2)
- if (!is.list(earg)) stop("earg must be a list")
- if (is.Numeric(earg$min)) min = earg$min
- if (is.Numeric(earg$max)) max = earg$max
- if (is.Numeric(earg$mux)) mux = earg$mux
- if (!is.Numeric(min, allowable.length = 1))
- stop("bad input for 'min' component")
- if (!is.Numeric(max, allowable.length = 1))
- stop("bad input for 'max' component")
- if (!is.Numeric(mux, allowable.length = 1, positive = TRUE))
- stop("bad input for 'mux' component")
- if (min >= max)
- stop("'min' >= 'max' is not allowed")
- if (is.character(theta)) {
- string <- if (short)
- paste("fsqrt(", theta, ")", sep = "") else {
- if (abs(mux-sqrt(2)) < 1.0e-10)
- paste("sqrt(2*", theta, ") - sqrt(2*(1-", theta, "))", sep = "") else
- paste(as.character(mux),
- " * (sqrt(", theta, "-",min, ") - sqrt(",max, "-", theta, "))", sep = "")
- }
- if (tag)
- string <- paste("Folded Square Root:", string)
- return(string)
+
+
+ mlogit <-
+ function(theta,
+ refLevel = "last",
+ M = NULL, # stop("argument 'M' not specified"),
+ whitespace = FALSE,
+ bvalue = NULL,
+ inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+{
+
+
+ fillerChar <- ifelse(whitespace, " ", "")
+
+ if (length(refLevel) != 1)
+ stop("the length of 'refLevel' must be one")
+
+ if (is.character(refLevel)) {
+ if (refLevel != "last")
+ stop('if a character, refLevel must be "last"')
+ refLevel <- -1
+ } else
+ if (is.factor(refLevel)) {
+ if (is.ordered(refLevel))
+ warning("argument 'refLevel' is from an ordered factor")
+ refLevel <- as.character(refLevel) == levels(refLevel)
+ refLevel <- (1:length(refLevel))[refLevel]
+ if (!is.Numeric(refLevel, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
+ stop("could not coerce 'refLevel' into a single positive integer")
+ } else
+ if (!is.Numeric(refLevel, allowable.length = 1,
+ integer.valued = TRUE, positive = TRUE))
+ stop("'refLevel' must be a single positive integer")
+
+
+
+
+ if (is.character(theta)) {
+ is.M <- is.finite(M) && is.numeric(M)
+ string <- if (short)
+ paste("mlogit(", theta, ")", sep = "") else {
+ if (refLevel < 0) {
+ ifelse(whitespace,
+ paste("log(", theta, "[,j] / ",
+ theta, "[,",
+ ifelse(is.M, M+1, "M+1"),
+ "]), j = 1:",
+ M, sep = ""),
+ paste("log(", theta, "[,j]/",
+ theta, "[,",
+ ifelse(is.M, M+1, "M+1"),
+ "]), j=1:",
+ ifelse(is.M, M, "M"), sep = ""))
+ } else {
+ if (refLevel == 1) {
+ paste("log(", theta, "[,", "j]",
+ fillerChar, "/", fillerChar,
+ "", theta, "[,", refLevel, "]), j",
+ fillerChar, "=", fillerChar, "2:",
+ ifelse(is.M, (M+1), "(M+1)"),
+ sep = "")
+ } else {
+ paste("log(", theta, "[,", "j]", fillerChar, "/",
+ "", theta, "[,", refLevel, "]), j",
+ fillerChar, "=", fillerChar,
+ "c(1:", refLevel-1, ",",
+ fillerChar,
+ refLevel+1, ":",
+ ifelse(is.M, (M+1), "(M+1)"),
+ ")", sep = "")
+ }
+ }
}
+ if (tag)
+ string <- paste("Multinomial logit link:", string)
+ return(string)
+ }
- if (inverse) {
- if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
- inverse = FALSE, deriv = deriv)
- } else {
- mid = (min + max) / 2
- boundary = mux * sqrt(max - min)
- temp = pmax(0, (theta/mux)^2 * (2*(max-min) - (theta/mux)^2))
- ans = theta
- if (any(ind5 <- theta < 0))
- ans[ind5] = mid - 0.5 * sqrt(temp[ind5])
- if (any(ind5 <- theta >= 0))
- ans[ind5] = mid + 0.5 * sqrt(temp[ind5])
- ans[theta < -boundary] <- NA
- ans[theta > boundary] <- NA
- ans
- }
+
+
+ M <- if (inverse) ncol(cbind(theta)) else
+ ncol(cbind(theta)) - 1
+ if (M < 1)
+ ifelse(inverse,
+ stop("argument 'eta' should have at least one column"),
+ stop("argument 'theta' should have at least two columns"))
+
+
+
+ if (!inverse && length(bvalue))
+ theta[theta <= 0.0] <- bvalue
+ if (!inverse && length(bvalue))
+ theta[theta >= 1.0] <- 1 - bvalue
+
+
+
+ foo <- function(eta, refLevel = -1, M) {
+ phat <- if ((refLevel < 0) || (refLevel == M+1)) {
+ cbind(exp(eta), 1)
+ } else if ( refLevel == 1) {
+ cbind(1, exp(eta))
} else {
- switch(deriv+1,
- mux * (sqrt(theta-min) - sqrt(max-theta)),
- (2 / mux) / (1/sqrt(theta-min) + 1/sqrt(max-theta)),
- -(4 / mux) / ((theta-min)^(-3/2) - (max-theta)^(-3/2)))
+ use.refLevel <- if ( refLevel < 0) M+1 else refLevel
+ etamat <- cbind(eta[, 1:( refLevel - 1)], 0,
+ eta[, ( refLevel ):M])
+ exp(etamat)
+ }
+ ans <- phat / rowSums(phat)
+ colnames(ans) <- NULL
+ ans
+ }
+
+
+ if (inverse) {
+ if (deriv > 0) {
+ 1 / Recall(theta = theta,
+ refLevel = refLevel,
+ bvalue = bvalue,
+ inverse = FALSE, deriv = deriv)
+ } else {
+ ans <- if ( refLevel < 0) {
+ log(theta[, -ncol(theta)] / theta[, ncol(theta)])
+ } else {
+ use.refLevel <- if ( refLevel < 0) ncol(theta) else refLevel
+ log(theta[, -( use.refLevel )] / theta[, use.refLevel ])
+ }
+ colnames(ans) <- NULL
+ ans
}
+ } else {
+ switch(deriv + 1,
+ foo(theta, refLevel, M = M), # log(theta[, -jay] / theta[, jay])
+ exp(log(theta) + log1p(-theta)),
+ exp(log(theta) + log1p(-theta)) * (1 - 2 * theta))
+ }
}
- powl <- function(theta, earg = list(power = 1), inverse = FALSE, deriv = 0,
+
+
+
+fsqrt <- function(theta, # = NA , = NULL,
+ min = 0, max = 1, mux = sqrt(2),
+ inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE) {
+ if (!is.Numeric(min, allowable.length = 1))
+ stop("bad input for 'min' component")
+ if (!is.Numeric(max, allowable.length = 1))
+ stop("bad input for 'max' component")
+ if (!is.Numeric(mux, allowable.length = 1, positive = TRUE))
+ stop("bad input for 'mux' component")
+ if (min >= max)
+ stop("'min' >= 'max' is not allowed")
- if (!length(earg) || is.list(earg)) {
- exponent = if (length(earg$power)) earg$power else 1
- if (exponent == 0)
- stop("use the 'loge' link")
- } else {
- stop("argument 'earg' must be a list or NULL")
+ if (is.character(theta)) {
+ string <- if (short)
+ paste("fsqrt(", theta, ")", sep = "") else {
+ if (abs(mux-sqrt(2)) < 1.0e-10)
+ paste("sqrt(2*", theta, ") - sqrt(2*(1-", theta, "))",
+ sep = "") else
+ paste(as.character(mux),
+ " * (sqrt(", theta, "-", min, ") - sqrt(",
+ max, "-", theta, "))",
+ sep = "")
}
+ if (tag)
+ string <- paste("Folded square root:", string)
+ return(string)
+ }
- if (is.character(theta)) {
- string <- if (short)
- paste("powl(", theta, ", earg = list(power = ", as.character(exponent),
- "))", sep = "") else
- paste(theta, "^(", as.character(exponent), ")", sep = "")
- if (tag)
- string <- paste("Power:", string)
- return(string)
- }
- if (inverse) {
- if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
- inverse = FALSE, deriv = deriv)
- } else {
- theta^(1/exponent)
- }
+ if (inverse) {
+ if (deriv > 0) {
+ 1 / Recall(theta = theta,
+ min = min, max = max, mux = mux,
+ inverse = FALSE, deriv = deriv)
} else {
- switch(deriv+1,
- {
- theta^exponent
- },
- {
- (theta^(1-exponent)) / exponent
- },
- {
- (theta^(2-exponent)) / (exponent * (exponent-1))
- })
+ mid <- (min + max) / 2
+ boundary <- mux * sqrt(max - min)
+ temp <- pmax(0, (theta/mux)^2 * (2*(max-min) - (theta/mux)^2))
+ ans <- theta
+ if (any(ind5 <- theta < 0))
+ ans[ind5] <- mid - 0.5 * sqrt(temp[ind5])
+ if (any(ind5 <- theta >= 0))
+ ans[ind5] <- mid + 0.5 * sqrt(temp[ind5])
+ ans[theta < -boundary] <- NA
+ ans[theta > boundary] <- NA
+ ans
}
+ } else {
+ switch(deriv+1,
+ mux * (sqrt(theta-min) - sqrt(max-theta)),
+ (2 / mux) / (1/sqrt(theta-min) + 1/sqrt(max-theta)),
+ -(4 / mux) / ((theta-min)^(-3/2) - (max-theta)^(-3/2)))
+ }
}
- elogit <- function(theta, earg = list(min = 0, max = 1), inverse = FALSE, deriv = 0,
- short = TRUE, tag = FALSE) {
- if (!length(earg) || is.list(earg)) {
- A = if (length(earg$min)) earg$min else 0
- B = if (length(earg$max)) earg$max else 1
- bminvalue = if (length(earg$bminval)) earg$bminval else NULL
- bmaxvalue = if (length(earg$bmaxval)) earg$bmaxval else NULL
- if (!inverse && length(bminvalue)) theta[theta <= A] <- bminvalue
- if (!inverse && length(bmaxvalue)) theta[theta >= B] <- bmaxvalue
- } else {
- stop("argument 'earg' must be a list or NULL")
- }
- if (is.character(theta)) {
- string <- if (short) {
- if (A != 0 || B != 1)
- paste("elogit(", theta, ", earg = list(min = ",A,
- ", max = ",B, "))",sep = "") else
- paste("elogit(", theta, ")",sep = "")
- } else
- paste("log((", theta, "-min)/(max-", theta, "))", sep = "")
- if (tag)
- string <- paste("Extended logit:", string)
- return(string)
- }
- if (inverse) {
- if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
- inverse = FALSE, deriv = deriv)
- } else {
- junk <- if (is.R()) care.exp(theta) else care.exp(theta)
- (A + B*junk) / (1.0 + junk)
- }
- } else {
- switch(deriv+1, {
- log((theta-A)/(B-theta))},
- (theta-A) * (B - theta) / (B-A),
- (theta-A) * (B - theta) * (B - 2 * theta + A) / (B-A)^2)
- }
-}
+ powl <- function(theta,
+ power = 1,
+ inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE) {
+ exponent <- power
+ if (exponent == 0)
+ stop("use the 'loge' link")
+
+ if (is.character(theta)) {
+ string <- if (short)
+ paste("powl(", theta, ", power = ",
+ as.character(exponent), ")",
+ sep = "") else
+ paste(theta, "^(", as.character(exponent), ")", sep = "")
+ if (tag)
+ string <- paste("Power link:", string)
+ return(string)
+ }
+
+ if (inverse) {
+ if (deriv > 0) {
+ 1 / Recall(theta = theta,
+ power = power,
+ inverse = FALSE, deriv = deriv)
+ } else {
+ theta^(1/exponent)
+ }
+ } else {
+ switch(deriv+1,
+ {
+ theta^exponent
+ },
+ {
+ (theta^(1-exponent)) / exponent
+ },
+ {
+ (theta^(2-exponent)) / (exponent * (exponent-1))
+ })
+ }
+}
+
+
+
+
+
+ elogit <- function(theta,
+ min = 0, max = 1,
+ bminvalue = NULL,
+ bmaxvalue = NULL,
+ inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE) {
+
+ A = min
+ B = max
+ if (!inverse && length(bminvalue)) theta[theta <= A] <- bminvalue
+ if (!inverse && length(bmaxvalue)) theta[theta >= B] <- bmaxvalue
- logit <- function(theta, earg = list(), inverse = FALSE, deriv = 0,
- short = TRUE, tag = FALSE) {
- if (is.character(theta)) {
- string <- if (short)
- paste("logit(", theta, ")", sep = "") else
- paste("log(", theta, "/(1-", theta, "))", sep = "")
- if (tag)
- string <- paste("Logit:", string)
- return(string)
- }
- if (!inverse && is.list(earg) && length(earg$bval)) {
- theta[theta <= 0.0] <- earg$bval;
- theta[theta >= 1.0] <- 1.0 - earg$bval;
- }
- if (inverse) {
- if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
- inverse = FALSE, deriv = deriv)
- } else {
- eta <- care.exp(theta)
- eta / (1.0 + eta)
- }
+ if (is.character(theta)) {
+ string <- if (short) {
+ if (A != 0 || B != 1)
+ paste("elogit(", theta,
+ ", min = ", A,
+ ", max = ", B, ")", sep = "") else
+ paste("elogit(", theta, ")", sep = "")
} else {
- switch(deriv+1, {
- temp2 = log(theta) - log1p(-theta)
- if (any(near0.5 <- (abs(theta - 0.5) < 0.000125)))
- temp2[near0.5] = log(theta[near0.5] / (1-theta[near0.5]))
- temp2
- },
- exp(log(theta) + log1p(-theta)),
- exp(log(theta) + log1p(-theta)) * (1 - 2 * theta))
+ paste("log((", theta, "-min)/(max-", theta, "))", sep = "")
}
+ if (tag)
+ string <- paste("Extended logit:", string)
+ return(string)
+ }
+
+ if (inverse) {
+ if (deriv > 0) {
+ 1 / Recall(theta = theta,
+ min = min, max = max,
+ bminvalue = bminvalue,
+ bmaxvalue = bmaxvalue,
+ inverse = FALSE, deriv = deriv)
+ } else {
+ junk <- care.exp(theta)
+ (A + B * junk) / (1.0 + junk)
+ }
+ } else {
+ switch(deriv+1, {
+ log((theta - A)/(B - theta))},
+ (theta - A) * (B - theta) / (B-A),
+ (theta - A) * (B - theta) * (B - 2 * theta + A) / (B-A)^2)
+ }
}
- logc <- function(theta, earg = list(), inverse = FALSE, deriv = 0,
+
+
+
+
+ logc <- function(theta,
+ bvalue = NULL, # .Machine$double.xmin is an alternative
+ inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE) {
- if (is.character(theta)) {
- string <- if (short)
- paste("logc(", theta, ")", sep = "") else
- paste("log(1-", theta, ")", sep = "")
- if (tag)
- string <- paste("Log Complementary:", string)
- return(string)
- }
+ if (is.character(theta)) {
+ string <- if (short)
+ paste("logc(", theta, ")", sep = "") else
+ paste("log(1-", theta, ")", sep = "")
+ if (tag)
+ string <- paste("Log Complementary:", string)
+ return(string)
+ }
- if (!inverse && is.list(earg) && length(earg$bval)) {
- theta[theta >= 1.0] <- earg$bval;
- }
- if (inverse) {
- if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
- inverse = FALSE, deriv = deriv)
- } else {
- -expm1(theta)
- }
+ if (!inverse && length(bvalue)) {
+ theta[theta >= 1.0] <- bvalue;
+ }
+ if (inverse) {
+ if (deriv > 0) {
+ 1 / Recall(theta = theta,
+ bvalue = bvalue,
+ inverse = FALSE, deriv = deriv)
} else {
- switch(deriv+1, {
- log1p(-theta)},
+ -expm1(theta)
+ }
+ } else {
+ switch(deriv+1, {
+ log1p(-theta)},
-(1.0 - theta),
-(1.0 - theta)^2)
- }
+ }
}
- logoff <- function(theta, earg = list(offset = 0), inverse = FALSE, deriv = 0,
- short = TRUE, tag = FALSE) {
- if (!length(earg) || is.list(earg)) {
- offset = if (length(earg$offset)) earg$offset else 0
- } else {
- stop("argument 'earg' must be a list or NULL")
- }
-
- if (!is.Numeric(offset))
- stop("bad input for argument 'earg'")
-
- if (is.character(theta)) {
- string <- if (short)
- paste("logoff(", theta,
- ", list(offset = ",as.character(offset), "))", sep = "") else
- paste("log(", as.character(offset), "+", theta, ")", sep = "")
- if (tag)
- string <- paste("Log with offset:", string)
- return(string)
- }
- if (inverse) {
- if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
- inverse = FALSE, deriv = deriv)
- } else {
- exp(theta) - offset
- }
- } else {
- switch(deriv+1,
- log(theta+offset),
- theta + offset,
- theta + offset)
- }
-}
-if(FALSE)
-nlogoff <- function(theta, earg = 0, inverse = FALSE, deriv = 0,
- short = TRUE, tag = FALSE)
-{
- offset = earg
- if (!is.Numeric(offset))
- stop("bad input for argument 'earg'")
- if (is.character(theta)) {
- string <- if (short)
- paste("nlogoff(", theta, ", ",as.character(offset), ")", sep = "") else
- paste("log(", as.character(offset), "-", theta, ")", sep = "")
- if (tag)
- string <- paste("Negative-log with offset:", string)
- return(string)
- }
- if (inverse) {
- if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
- inverse = FALSE, deriv = deriv)
- } else {
- offset - exp(theta)
- }
- } else {
- switch(deriv+1,
- log(-theta+offset),
- theta - offset,
- theta - offset)
- }
-}
- cauchit <- function(theta, earg = list(bvalue= .Machine$double.eps),
+ cauchit <- function(theta,
+ bvalue = .Machine$double.eps,
inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE)
{
- if (is.character(theta)) {
- string <- if (short)
- paste("cauchit(", theta, ")", sep = "") else
- paste("tan(pi*(", theta, "-0.5))", sep = "")
- if (tag)
- string <- paste("Cauchit:", string)
- return(string)
- }
- if (!inverse && is.list(earg) && length(earg$bval)) {
- theta[theta <= 0.0] <- earg$bval
- theta[theta >= 1.0] <- 1.0 - earg$bval
- }
- if (inverse) {
- if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
- inverse = FALSE, deriv = deriv)
- } else {
- 0.5 + atan(theta)/pi
- }
- } else {
- switch(deriv+1, {
- tan(pi * (theta-0.5))},
- cos(pi * (theta-0.5))^2 / pi,
- -sin(2 * pi * (theta-0.5)))
- }
+ if (is.character(theta)) {
+ string <- if (short)
+ paste("cauchit(", theta, ")", sep = "") else
+ paste("tan(pi*(", theta, "-0.5))", sep = "")
+ if (tag)
+ string <- paste("Cauchit:", string)
+ return(string)
+ }
+
+ if (!inverse && length(bvalue)) {
+ theta[theta <= 0.0] <- bvalue
+ theta[theta >= 1.0] <- 1.0 - bvalue
+ }
+ if (inverse) {
+ if (deriv > 0) {
+ 1 / Recall(theta = theta,
+ bvalue = bvalue,
+ inverse = FALSE, deriv = deriv)
+ } else {
+ 0.5 + atan(theta) / pi
+ }
+ } else {
+ switch(deriv+1, {
+ tan(pi * (theta-0.5))},
+ cos(pi * (theta-0.5))^2 / pi,
+ -sin(pi * (theta-0.5) * 2))
+ }
}
- golf <- function(theta, earg = list(lambda = 1), inverse = FALSE, deriv = 0,
+
+
+ golf <- function(theta,
+ lambda = 1,
+ cutpoint = NULL,
+ inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE)
{
- cutpoint = lambda = NULL
- if (!length(earg)) {
- lambda = 1
- cutpoint = NULL
- } else if (is.list(earg)) {
- lambda = earg$lambda
- cutpoint = earg$cutpoint # Optional; if so then is a NULL
- } else
- stop("argument 'earg' must be a list")
- if (!is.Numeric(lambda, positive = TRUE))
- stop('could not determine lambda or lambda has negative values')
- if (is.Numeric(cutpoint))
- if (any(cutpoint < 0) ||
- !is.Numeric(cutpoint, integer.valued = TRUE))
- warning("argument 'cutpoint' should contain ",
- "non-negative integer values")
- if (is.character(theta)) {
- string <- if (short) {
- lenl = length(lambda) > 1
- lenc = length(cutpoint) > 1
- paste("golf(", theta, ", earg = list(lambda = ",
- if (lenl) "c(" else "",
- ToString(lambda),
- if (lenl) ")" else "",
- if (is.Numeric(cutpoint))
- paste(", cutpoint = ",
- if (lenc) "c(" else "",
- ToString(cutpoint),
- if (lenc) ")" else "",
- sep = "") else "",
- "))", sep = "") } else {
- if (is.Numeric(cutpoint)) {
- paste("-3*log(1-qnorm(", theta, ")/(3*sqrt(lambda)))",
- " + log(cutpoint)", sep = "")
- } else {
- paste("-3*log(1-qnorm(", theta, ")/(3*sqrt(lambda)))", sep = "")
- }
- }
- if (tag)
- string <- paste("Gamma-ordinal link function:", string)
- return(string)
- }
+ if (!is.Numeric(lambda, positive = TRUE))
+ stop('could not determine lambda or lambda has negative values')
+ if (is.Numeric(cutpoint))
+ if (any(cutpoint < 0) ||
+ !is.Numeric(cutpoint, integer.valued = TRUE))
+ warning("argument 'cutpoint' should contain ",
+ "non-negative integer values")
- thmat = cbind(theta)
- lambda = rep(lambda, len=ncol(thmat)) # Allow recycling for lambda
- if (is.Numeric(cutpoint))
- cutpoint = rep(cutpoint, len=ncol(thmat))
- if (ncol(thmat) > 1) {
- answer = thmat
- for(ii in 1:ncol(thmat))
- answer[,ii] = Recall(theta = thmat[,ii],
- earg = list(lambda=lambda[ii],
- cutpoint =
- if (is.Numeric(cutpoint)) cutpoint[ii] else NULL),
- inverse = inverse, deriv = deriv)
- return(answer)
+ if (is.character(theta)) {
+ string <- if (short) {
+ lenl <- length(lambda) > 1
+ lenc <- length(cutpoint) > 1
+ paste("golf(", theta,
+ ", lambda = ",
+ if (lenl) "c(" else "",
+ ToString(lambda),
+ if (lenl) ")" else "",
+ if (is.Numeric(cutpoint))
+ paste(", cutpoint = ",
+ if (lenc) "c(" else "",
+ ToString(cutpoint),
+ if (lenc) ")" else "",
+ sep = "") else "",
+ ")", sep = "")
+ } else {
+ if (is.Numeric(cutpoint)) {
+ paste("-3*log(1-qnorm(", theta,
+ ")/(3*sqrt(lambda)))",
+ " + log(cutpoint)", sep = "")
+ } else {
+ paste("-3*log(1-qnorm(", theta,
+ ")/(3*sqrt(lambda)))", sep = "")
+ }
}
+ if (tag)
+ string <- paste("Gamma-ordinal link function:", string)
+ return(string)
+ }
- answer =
- if (inverse) {
- if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
- inverse = FALSE, deriv = deriv)
- } else {
- if (is.Numeric(cutpoint)) {
- pnorm((1-care.exp(-(theta-log(cutpoint))/3)) * 3 * sqrt(lambda))
- } else {
- pnorm((1-care.exp(-theta/3)) * 3 * sqrt(lambda))
- }
- }
+
+ thmat <- cbind(theta)
+ lambda <- rep(lambda, len = ncol(thmat)) # Allow recycling for lambda
+ if (is.Numeric(cutpoint))
+ cutpoint <- rep(cutpoint, len = ncol(thmat))
+ if (ncol(thmat) > 1) {
+ answer <- thmat
+ for(ii in 1:ncol(thmat))
+ answer[,ii] <- Recall(theta = thmat[,ii],
+ lambda = lambda[ii],
+ cutpoint = if (is.Numeric(cutpoint))
+ cutpoint[ii] else NULL,
+ inverse = inverse, deriv = deriv)
+ return(answer)
+ }
+
+
+ answer <- if (inverse) {
+ if (deriv > 0) {
+ 1 / Recall(theta = theta,
+ lambda = lambda,
+ cutpoint = cutpoint,
+ inverse = FALSE, deriv = deriv)
} else {
- smallno = 1 * .Machine$double.eps
- Theta = theta
- Theta = pmin(Theta, 1 - smallno) # Since theta == 1 is a possibility
- Theta = pmax(Theta, smallno) # Since theta == 0 is a possibility
- Ql = qnorm(Theta)
- switch(deriv+1, {
- temp = Ql / (3*sqrt(lambda))
- temp = pmin(temp, 1.0 - smallno) # 100 / .Machine$double.eps
- -3*log1p(-temp) +
- if (is.Numeric(cutpoint)) log(cutpoint) else 0},
- (1 - Ql / (3*sqrt(lambda))) * sqrt(lambda) * dnorm(Ql),
- { stop('cannot handle deriv = 2') },
- stop("argument 'deriv' unmatched"))
+ if (is.Numeric(cutpoint)) {
+ pnorm((1-care.exp(-(theta-log(cutpoint))/3)) * 3 * sqrt(lambda))
+ } else {
+ pnorm((1-care.exp(-theta/3)) * 3 * sqrt(lambda))
+ }
}
- if (!is.Numeric(answer)) stop("the answer contains some NAs")
- answer
+ } else {
+ smallno <- 1 * .Machine$double.eps
+ Theta <- theta
+ Theta <- pmin(Theta, 1 - smallno) # Since theta == 1 is a possibility
+ Theta <- pmax(Theta, smallno) # Since theta == 0 is a possibility
+ Ql <- qnorm(Theta)
+ switch(deriv+1, {
+ temp <- Ql / (3*sqrt(lambda))
+ temp <- pmin(temp, 1.0 - smallno) # 100 / .Machine$double.eps
+ -3*log1p(-temp) +
+ if (is.Numeric(cutpoint)) log(cutpoint) else 0},
+ (1 - Ql / (3*sqrt(lambda))) * sqrt(lambda) * dnorm(Ql),
+ { stop('cannot handle deriv = 2') },
+ stop("argument 'deriv' unmatched"))
+ }
+ if (!is.Numeric(answer))
+ stop("the answer contains some NAs")
+ answer
}
- polf <- function(theta, earg = stop("argument 'earg' must be given"),
- inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
-{
- cutpoint = NULL
- if (is.Numeric(earg)) cutpoint = earg
- if (is.list(earg)) cutpoint = earg$cutpoint
- if (!is.Numeric(cutpoint))
- stop("could not determine the cutpoint")
- if (any(cutpoint < 0) ||
- !is.Numeric(cutpoint, integer.valued = TRUE))
- warning("argument 'cutpoint' should",
- " contain non-negative integer values")
- if (is.character(theta)) {
- string <- if (short) {
- lenc = length(cutpoint) > 1
- paste("polf(", theta, ", earg = list(cutpoint = ",
- if (lenc) "c(" else "",
- ToString(cutpoint),
- if (lenc) ")" else "",
- "))", sep = "")
- } else
- paste("2*log(0.5*qnorm(", theta, ") + sqrt(cutpoint+7/8))", sep = "")
- if (tag)
- string <- paste("Poisson-ordinal link function:", string)
- return(string)
- }
+
+ polf <- function(theta, # = 1,
+ cutpoint = NULL,
+ inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE) {
+ if (!is.Numeric(cutpoint))
+ stop("could not determine the cutpoint")
+ if (any(cutpoint < 0) ||
+ !is.Numeric(cutpoint, integer.valued = TRUE))
+ warning("argument 'cutpoint' should",
+ " contain non-negative integer values")
+
+
+ if (is.character(theta)) {
+ string <- if (short) {
+ lenc = length(cutpoint) > 1
+ paste("polf(", theta,
+ ", cutpoint = ",
+ if (lenc) "c(" else "",
+ ToString(cutpoint),
+ if (lenc) ")" else "",
+ ")", sep = "")
+ } else
+ paste("2*log(0.5*qnorm(", theta,
+ ") + sqrt(cutpoint+7/8))", sep = "")
+ if (tag)
+ string <- paste("Poisson-ordinal link function:", string)
+ return(string)
+ }
+
thmat = cbind(theta)
if (ncol(thmat) > 1) {
answer = thmat
- cutpoint = rep(cutpoint, len=ncol(thmat)) # Reqd for the for loop
+ cutpoint = rep(cutpoint, len = ncol(thmat)) # Reqd for the for loop
for(ii in 1:ncol(thmat))
- answer[,ii] = Recall(theta = thmat[,ii], earg=cutpoint[ii],
+ answer[,ii] = Recall(theta = thmat[,ii],
+ cutpoint = cutpoint,
inverse = inverse, deriv = deriv)
return(answer)
}
- answer =
- if (inverse) {
- if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
- inverse = FALSE, deriv = deriv)
- } else {
- if (cutpoint == 0) {
- cloglog(theta = theta, earg = earg,
- inverse = inverse, deriv = deriv)
- } else {
- pnorm(2 * exp(theta/2) - 2 * sqrt(cutpoint + 7/8))
- }
- }
+ answer =
+ if (inverse) {
+ if (deriv > 0) {
+ 1 / Recall(theta = theta,
+ cutpoint = cutpoint,
+ inverse = FALSE, deriv = deriv)
+ } else {
+ if (any(cp.index <- cutpoint == 0)) {
+ tmp <- theta
+ tmp[cp.index] <-
+ cloglog(theta = theta[cp.index],
+ inverse = inverse, deriv = deriv)
+ tmp[!cp.index] <-
+ pnorm(2 * exp(theta[!cp.index]/2) -
+ 2 * sqrt(cutpoint[!cp.index] + 7/8))
+ tmp
+ } else {
+ pnorm(2 * exp(theta/2) - 2 * sqrt(cutpoint + 7/8))
+ }
+ }
+ } else {
+ if (any(cp.index <- cutpoint == 0)) {
+ cloglog(theta = theta,
+ inverse = inverse, deriv = deriv)
} else {
- if (cutpoint == 0) {
- cloglog(theta = theta, earg = earg,
- inverse = inverse, deriv = deriv)
- } else {
- smallno = 1 * .Machine$double.eps
- SMALLNO = 1 * .Machine$double.xmin
- Theta = theta
- Theta = pmin(Theta, 1 - smallno) # Since theta == 1 is a possibility
- Theta = pmax(Theta, smallno) # Since theta == 0 is a possibility
- Ql = qnorm(Theta)
- switch(deriv+1, {
- temp = 0.5 * Ql + sqrt(cutpoint + 7/8)
- temp = pmax(temp, SMALLNO)
- 2 * log(temp)},
- (Ql/2 + sqrt(cutpoint + 7/8)) * dnorm(Ql),
- { stop('cannot handle deriv = 2') },
- stop("argument 'deriv' unmatched"))
- }
+ smallno = 1 * .Machine$double.eps
+ SMALLNO = 1 * .Machine$double.xmin
+ Theta = theta
+ Theta = pmin(Theta, 1 - smallno) # Since theta == 1 is a possibility
+ Theta = pmax(Theta, smallno) # Since theta == 0 is a possibility
+ Ql = qnorm(Theta)
+ switch(deriv+1, {
+ temp = 0.5 * Ql + sqrt(cutpoint + 7/8)
+ temp = pmax(temp, SMALLNO)
+ 2 * log(temp)},
+ (Ql/2 + sqrt(cutpoint + 7/8)) * dnorm(Ql),
+ { stop('cannot handle deriv = 2') },
+ stop("argument 'deriv' unmatched"))
}
- if (!is.Numeric(answer)) stop("the answer contains some NAs")
- answer
+ }
+ if (!is.Numeric(answer))
+ stop("the answer contains some NAs")
+ answer
}
- nbolf <- function(theta, earg = stop("argument 'earg' must be given"),
- inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) {
- cutpoint = kay = NULL
- if (is.list(earg)) {
- cutpoint = earg$cutpoint
- kay = earg$k
- }
- if (!is.Numeric(kay, positive = TRUE))
- stop("could not determine 'k' or it is not positive-valued")
- if (!is.Numeric(cutpoint))
- stop("could not determine the cutpoint")
- if (any(cutpoint < 0) ||
- !is.Numeric(cutpoint, integer.valued = TRUE))
- warning("argument 'cutpoint' should",
- " contain non-negative integer values")
- if (is.character(theta)) {
- string <- if (short) {
- lenc = length(cutpoint) > 1
- lenk = length(kay) > 1
- paste("nbolf(", theta, ", earg = list(cutpoint = ",
- if (lenc) "c(" else "",
- ToString(cutpoint),
- if (lenc) ")" else "",
- ", k = ",
- if (lenk) "c(" else "",
- ToString(kay),
- if (lenk) ")" else "",
- "))", sep = "")
- } else
- paste("2*log(sqrt(k) * sinh(qnorm(", theta, ")/(2*sqrt(k)) + ",
- "asinh(sqrt(cutpoint/k))))", sep = "")
- if (tag)
- string <- paste("Negative binomial-ordinal link function:", string)
- return(string)
- }
+
+ nbolf <- function(theta,
+ cutpoint = NULL,
+ k = NULL,
+ inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE) {
+
+ kay = k
+ if (!is.Numeric(kay, positive = TRUE))
+ stop("could not determine 'k' or it is not positive-valued")
+ if (!is.Numeric(cutpoint))
+ stop("could not determine the cutpoint")
+ if (any(cutpoint < 0) ||
+ !is.Numeric(cutpoint, integer.valued = TRUE))
+ warning("argument 'cutpoint' should",
+ " contain non-negative integer values")
+
+ if (is.character(theta)) {
+ string <- if (short) {
+ lenc = length(cutpoint) > 1
+ lenk = length(kay) > 1
+ paste("nbolf(", theta,
+ ", cutpoint = ",
+ if (lenc) "c(" else "",
+ ToString(cutpoint),
+ if (lenc) ")" else "",
+ ", k = ",
+ if (lenk) "c(" else "",
+ ToString(kay),
+ if (lenk) ")" else "",
+ ")", sep = "")
+ } else
+ paste("2*log(sqrt(k) * sinh(qnorm(", theta,
+ ")/(2*sqrt(k)) + ",
+ "asinh(sqrt(cutpoint/k))))", sep = "")
+ if (tag)
+ string <- paste("Negative binomial-ordinal link function:",
+ string)
+ return(string)
+ }
+
thmat = cbind(theta)
- kay = rep(kay, len=ncol(thmat)) # Allow recycling for kay
- cutpoint = rep(cutpoint, len=ncol(thmat)) # Allow recycling for cutpoint
+ kay = rep(kay, len = ncol(thmat)) # Allow recycling for kay
+ cutpoint = rep(cutpoint, len = ncol(thmat)) # Allow recycling for cutpoint
if (ncol(thmat) > 1) {
- answer = thmat
- for(ii in 1:ncol(thmat))
- answer[,ii] = Recall(theta = thmat[,ii],
- earg = list(cutpoint = cutpoint[ii], k = kay[ii]),
- inverse=inverse, deriv = deriv)
- return(answer)
+ answer = thmat
+ for(ii in 1:ncol(thmat))
+ answer[,ii] = Recall(theta = thmat[,ii],
+ cutpoint = cutpoint[ii],
+ k = kay[ii],
+ inverse = inverse, deriv = deriv)
+ return(answer)
}
answer =
if (inverse) {
if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
+ 1 / Recall(theta = theta,
+ cutpoint = cutpoint,
+ k = kay,
inverse = FALSE, deriv = deriv)
} else {
if (cutpoint == 0) {
@@ -1120,7 +1278,7 @@ nlogoff <- function(theta, earg = 0, inverse = FALSE, deriv = 0,
smallno = 1 * .Machine$double.eps
SMALLNO = 1 * .Machine$double.xmin
Theta = theta
- Theta = pmin(Theta, 1 - smallno) # Since theta == 1 is a possibility
+ Theta = pmin(Theta, 1 - smallno) # Since theta == 1 is a possibility
Theta = pmax(Theta, smallno) # Since theta == 0 is a possibility
if (cutpoint == 0) {
switch(deriv+1, {
@@ -1151,28 +1309,30 @@ nlogoff <- function(theta, earg = 0, inverse = FALSE, deriv = 0,
- nbolf2 <- function(theta, earg = stop("argument 'earg' must be given"),
- inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE) {
- cutpoint = kay = NULL
- if (is.list(earg)) {
- cutpoint = earg$cutpoint
- kay = earg$k
- }
+ nbolf2 <- function(theta,
+ cutpoint = NULL,
+ k = NULL,
+ inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE) {
+
+ kay = k
if (!is.Numeric(kay, positive = TRUE))
- stop("could not determine argument 'k' or it is not positive-valued")
+ stop("could not determine argument 'k' or ",
+ "it is not positive-valued")
if (!is.Numeric(cutpoint))
- stop("could not determine the cutpoint")
+ stop("could not determine the cutpoint")
if (any(cutpoint < 0) ||
!is.Numeric(cutpoint, integer.valued = TRUE))
- warning("argument 'cutpoint' should",
- " contain non-negative integer values")
+ warning("argument 'cutpoint' should ",
+ "contain non-negative integer values")
if (is.character(theta)) {
string <- if (short) {
lenc = length(cutpoint) > 1
lenk = length(kay) > 1
- paste("nbolf2(", theta, ", earg = list(cutpoint = ",
+ paste("nbolf2(", theta,
+ ", earg = list(cutpoint = ",
if (lenc) "c(" else "",
ToString(cutpoint),
if (lenc) ")" else "",
@@ -1181,28 +1341,34 @@ nlogoff <- function(theta, earg = 0, inverse = FALSE, deriv = 0,
ToString(kay),
if (lenk) ")" else "",
"))", sep = "")
- } else
- paste("3*log(<a complicated expression>)", sep = "")
- if (tag)
- string = paste("Negative binomial-ordinal link function 2:", string)
- return(string)
+ } else {
+ paste("3*log(<a complicated expression>)", sep = "")
}
+ if (tag)
+ string = paste("Negative binomial-ordinal link function 2:",
+ string)
+ return(string)
+ }
+
thmat = cbind(theta)
- kay = rep(kay, len=ncol(thmat)) # Allow recycling for kay
+ kay = rep(kay, len = ncol(thmat)) # Allow recycling for kay
if (ncol(thmat) > 1) {
answer = thmat
for(ii in 1:ncol(thmat))
answer[,ii] = Recall(theta = thmat[,ii],
- earg = list(cutpoint = cutpoint[ii], k = kay[ii]),
- inverse=inverse, deriv = deriv)
+ cutpoint = cutpoint[ii],
+ k = kay[ii],
+ inverse = inverse, deriv = deriv)
return(answer)
}
answer =
if (inverse) {
if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
+ 1 / Recall(theta = theta,
+ cutpoint = cutpoint,
+ k = kay,
inverse = FALSE, deriv = deriv)
} else {
if (cutpoint == 0) {
@@ -1215,14 +1381,16 @@ nlogoff <- function(theta, earg = 0, inverse = FALSE, deriv = 0,
a4 = 9 / (cutpoint+1)
B = exp(theta/3)
mymat = rbind(a1^2*a2^2 + 2*a1*a2^3*B + B^2*a2^4, 0,
- -2*a1*a2*a3*B - 2*a2^2*a3*B^2 - a1^2*a3 - a2^2*a4, 0,
- B^2 * a3^2 + a3 * a4)
+ -2*a1*a2*a3*B - 2*a2^2*a3*B^2 - a1^2*a3 - a2^2*a4, 0,
+ B^2 * a3^2 + a3 * a4)
ans = Re(t(apply(mymat, 2, polyroot)))
theta2 = invfun = pnorm(-ans) # pnorm(-x) = 1-pnorm(x)
for(ii in 1:4) {
- theta2[,ii] = Recall(theta = theta2[,ii],
- earg = list(cutpoint = cutpoint, k = kay),
- inverse = FALSE, deriv = deriv)
+ theta2[,ii] =
+ Recall(theta = theta2[,ii],
+ cutpoint = cutpoint,
+ k = kay,
+ inverse = FALSE, deriv = deriv)
}
rankmat = t(apply(abs(theta2 - theta), 1, rank))
for(ii in 2:4) {
@@ -1237,7 +1405,7 @@ nlogoff <- function(theta, earg = 0, inverse = FALSE, deriv = 0,
smallno = 1 * .Machine$double.eps
SMALLNO = 1 * .Machine$double.xmin
Theta = theta
- Theta = pmin(Theta, 1 - smallno) # Since theta == 1 is a possibility
+ Theta = pmin(Theta, 1 - smallno) # Since theta == 1 is a possibility
Theta = pmax(Theta, smallno) # Since theta == 0 is a possibility
if (cutpoint == 0) {
switch(deriv+1, {
@@ -1245,7 +1413,7 @@ nlogoff <- function(theta, earg = 0, inverse = FALSE, deriv = 0,
temp = pmax(temp, SMALLNO)
log(kay) + log(temp)},
(kay / (1 - Theta)^(1/kay) - kay) * (1 - Theta)^(kay+1/kay),
- { stop('cannot handle deriv = 2') },
+ { stop("cannot handle 'deriv = 2'") },
stop("argument 'deriv' unmatched"))
} else {
Ql = qnorm(Theta)
@@ -1262,7 +1430,8 @@ nlogoff <- function(theta, earg = 0, inverse = FALSE, deriv = 0,
temp = ifelse(argmax1 > 0, argmax1, argmax2)
temp = pmax(temp, SMALLNO)
3 * log(temp)}, {
- BB = (sqrt(discrim) - Ql^2 * a3 * a4 / sqrt(discrim)) / dnorm(Ql)
+ BB = (sqrt(discrim) - Ql^2 * a3 *
+ a4 / sqrt(discrim)) / dnorm(Ql)
CC = 2 * Ql * a3 / dnorm(Ql)
dA.dtheta = (-denomin * BB - numerat * CC) / denomin^2
argmax1 / (3 * dA.dtheta)
@@ -1277,40 +1446,41 @@ nlogoff <- function(theta, earg = 0, inverse = FALSE, deriv = 0,
- Cut = function(y, breaks = c(-Inf, quantile(c(y), prob = (1:4)/4))) {
- y = as.matrix(y)
+ Cut <- function(y, breaks = c(-Inf, quantile(c(y), prob = (1:4)/4))) {
+ y <- as.matrix(y)
- temp = cut(y, breaks=breaks, labels = FALSE)
- temp = c(temp) # integer vector of integers
- if (any(is.na(temp)))
- stop("there are NAs")
- answer = if (ncol(y) > 1) matrix(temp, nrow(y), ncol(y)) else temp
- if (ncol(y) > 1) {
- ynames = dimnames(y)[[2]]
- if (!length(ynames)) ynames = paste("Y", 1:ncol(y), sep = "")
- xnames = dimnames(y)[[1]]
- if (!length(xnames)) xnames = as.character(1:nrow(y))
- dimnames(answer) = list(xnames, ynames)
- }
- attr(answer, "breaks") = breaks
- answer
+ temp <- cut(y, breaks = breaks, labels = FALSE)
+ temp <- c(temp) # integer vector of integers
+ if (any(is.na(temp)))
+ stop("there are NAs")
+ answer <- if (ncol(y) > 1) matrix(temp, nrow(y), ncol(y)) else temp
+ if (ncol(y) > 1) {
+ ynames <- dimnames(y)[[2]]
+ if (!length(ynames))
+ ynames <- paste("Y", 1:ncol(y), sep = "")
+ xnames <- dimnames(y)[[1]]
+ if (!length(xnames)) xnames = as.character(1:nrow(y))
+ dimnames(answer) <- list(xnames, ynames)
+ }
+ attr(answer, "breaks") <- breaks
+ answer
}
- checkCut = function(y) {
- if (!is.Numeric(y, positive = TRUE, integer.valued = TRUE))
- stop("argument 'y' must contain positive integers only")
- uy = unique(y)
- L = max(uy)
- oklevels = 1:L
- if (L == 1)
- stop("only one unique value")
- for(ii in oklevels) {
- if (all(ii != uy))
- stop("there is no ", ii, " value")
- }
- TRUE
+ checkCut <- function(y) {
+ if (!is.Numeric(y, positive = TRUE, integer.valued = TRUE))
+ stop("argument 'y' must contain positive integers only")
+ uy <- unique(y)
+ L <- max(uy)
+ oklevels <- 1:L
+ if (L == 1)
+ stop("only one unique value")
+ for(ii in oklevels) {
+ if (all(ii != uy))
+ stop("there is no ", ii, " value")
+ }
+ TRUE
}
@@ -1321,7 +1491,11 @@ nlogoff <- function(theta, earg = 0, inverse = FALSE, deriv = 0,
- nbcanlink <- function(theta, earg = list(), inverse = FALSE, deriv = 0,
+ nbcanlink <- function(theta,
+ size = NULL,
+ wrt.eta = NULL,
+ bvalue = NULL,
+ inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE)
{
if (is.character(theta)) {
@@ -1334,56 +1508,58 @@ nlogoff <- function(theta, earg = 0, inverse = FALSE, deriv = 0,
}
- if (!length(earg))
- stop("argument 'earg' should have the eta matrix")
- kmatrix = earg$size
- if (!length(kmatrix))
- stop("argument 'earg' should have a 'size' component")
- theta = cbind(theta)
- kmatrix = cbind(kmatrix)
+
+ kmatrix <- size
+ theta <- cbind(theta)
+ kmatrix <- cbind(kmatrix)
if (ncol(kmatrix) != ncol(theta))
- stop("arguments 'theta' and 'earg$size' do not have ",
+ stop("arguments 'theta' and 'size' do not have ",
"an equal number of cols")
if (nrow(kmatrix) != nrow(theta))
- stop("arguments 'theta' and 'earg$size' do not have ",
+ stop("arguments 'theta' and 'size' do not have ",
"an equal number of rows")
if (deriv > 0) {
- wrt.eta = earg$wrt.eta
- if (!length(wrt.eta))
- stop("argument 'earg' should have a 'wrt.eta' component")
if (!(wrt.eta %in% 1:2))
- stop("argument 'earg' should be 1 or 2")
+ stop("argument 'wrt.eta' should be 1 or 2")
}
- if (!inverse && is.list(earg) && length(earg$bval))
- theta[theta <= 0.0] <- earg$bval
-
+ if (!inverse && length(bvalue))
+ theta[theta <= 0.0] <- bvalue
if (inverse) {
if (deriv > 0) {
- 1 / Recall(theta = theta, earg = earg,
+ 1 / Recall(theta = theta,
+ size = size,
+ wrt.eta = wrt.eta,
+ bvalue = bvalue,
inverse = FALSE, deriv = deriv)
} else {
- ans = (kmatrix / expm1(-theta))
- if (is.matrix(ans)) dimnames(ans) = NULL else names(ans) = NULL
+ ans <- (kmatrix / expm1(-theta))
+ if (is.matrix(ans))
+ dimnames(ans) <- NULL else
+ names(ans) <- NULL
ans
}
} else {
- ans =
+ ans <-
switch(deriv+1,
(log(theta / (theta + kmatrix))),
if (wrt.eta == 1) theta * (theta + kmatrix) / kmatrix else
-(theta + kmatrix),
- if (wrt.eta == 1)
- -(theta * (theta + kmatrix))^2 / ((2 * theta + kmatrix) * kmatrix) else
+ if (wrt.eta == 1)
+ -(theta * (theta + kmatrix))^2 / ((2 * theta + kmatrix) *
+ kmatrix) else
(theta + kmatrix)^2)
- if (is.matrix(ans)) dimnames(ans) = NULL else names(ans) = NULL
+ if (is.matrix(ans))
+ dimnames(ans) <- NULL else
+ names(ans) <- NULL
ans
}
}
+
diff --git a/R/logLik.vlm.q b/R/logLik.vlm.q
index aee904e..93fdda9 100644
--- a/R/logLik.vlm.q
+++ b/R/logLik.vlm.q
@@ -35,16 +35,16 @@ setMethod("logLik", "vgam", function(object, ...)
constraints.vlm <- function(object,
- type = c("vlm", "lm"),
+ type = c("lm", "term"),
all = TRUE, which, ...) {
- type <- match.arg(type, c("vlm","lm"))[1]
+ type <- match.arg(type, c("lm", "term"))[1]
- Hlist <-
- ans <- slot(object, "constraints") # For "vlm"
- if (type == "lm") {
+ Hlist <- ans <- slot(object, "constraints") # For "lm" (formerly "vlm")
+
+ if (type == "term") {
oassign.LM <- object at misc$orig.assign
x.LM <- model.matrix(object)
@@ -58,12 +58,13 @@ constraints.vlm <- function(object,
ans[[ii]] <- (Hlist[[col.ptr]])
}
names(ans) <- names.att.x.LM
- } # End of "lm"
+ } # End of "term"
if (all) ans else ans[[which]]
}
+
if (!isGeneric("constraints"))
setGeneric("constraints", function(object, ...)
standardGeneric("constraints"))
@@ -77,6 +78,3 @@ setMethod("constraints", "vlm", function(object, ...)
-
-
-
diff --git a/R/model.matrix.vglm.q b/R/model.matrix.vglm.q
index 2cc75f4..d2292c4 100644
--- a/R/model.matrix.vglm.q
+++ b/R/model.matrix.vglm.q
@@ -214,7 +214,10 @@
x = slot(object, "x")
- Xm2 = slot(object, "Xm2")
+
+
+ Xm2 = if (any(slotNames(object) == "Xm2")) slot(object, "Xm2") else
+ numeric(0)
if (!length(x)) {
data = model.frame(object, xlev = object at xlevels, ...)
@@ -255,7 +258,7 @@
M = object at misc$M
- Blist = object at constraints # == constraints(object, type = "vlm")
+ Blist = object at constraints # == constraints(object, type = "lm")
X_vlm <- lm2vlm.model.matrix(x = x, Blist = Blist,
xij = object at control$xij, Xm2 = Xm2)
@@ -415,7 +418,7 @@ setMethod("depvar", "qrrvglm", function(object, ...)
depvar.vlm(object, ...))
setMethod("depvar", "cao", function(object, ...)
depvar.vlm(object, ...))
-setMethod("depvar", "rcam", function(object, ...)
+setMethod("depvar", "rcim", function(object, ...)
depvar.vlm(object, ...))
@@ -443,7 +446,7 @@ setMethod("npred", "qrrvglm", function(object, ...)
npred.vlm(object, ...))
setMethod("npred", "cao", function(object, ...)
npred.vlm(object, ...))
-setMethod("npred", "rcam", function(object, ...)
+setMethod("npred", "rcim", function(object, ...)
npred.vlm(object, ...))
@@ -451,7 +454,6 @@ setMethod("npred", "rcam", function(object, ...)
-
hatvaluesvlm <- function(model,
type = c("diagonal", "matrix", "centralBlocks"), ...) {
@@ -523,8 +525,6 @@ hatvaluesvlm <- function(model,
}
-
-
if (!isGeneric("hatvalues"))
setGeneric("hatvalues", function(model, ...)
standardGeneric("hatvalues"), package = "VGAM")
@@ -540,7 +540,7 @@ setMethod("hatvalues", "qrrvglm", function(model, ...)
hatvaluesvlm(model, ...))
setMethod("hatvalues", "cao", function(model, ...)
hatvaluesvlm(model, ...))
-setMethod("hatvalues", "rcam", function(model, ...)
+setMethod("hatvalues", "rcim", function(model, ...)
hatvaluesvlm(model, ...))
@@ -614,7 +614,7 @@ setMethod("hatplot", "qrrvglm", function(model, ...)
hatplot.vlm(model, ...))
setMethod("hatplot", "cao", function(model, ...)
hatplot.vlm(model, ...))
-setMethod("hatplot", "rcam", function(model, ...)
+setMethod("hatplot", "rcim", function(model, ...)
hatplot.vlm(model, ...))
@@ -685,7 +685,7 @@ dfbetavlm <-
control = new.control,
criterion = new.control$criterion, # "coefficients",
qr.arg = FALSE,
- constraints = constraints(model, type = "lm"),
+ constraints = constraints(model, type = "term"),
extra = model at extra,
Terms = Terms.zz,
function.name = "vglm")
@@ -720,11 +720,48 @@ setMethod("dfbeta", "qrrvglm", function(model, ...)
dfbetavlm(model, ...))
setMethod("dfbeta", "cao", function(model, ...)
dfbetavlm(model, ...))
-setMethod("dfbeta", "rcam", function(model, ...)
+setMethod("dfbeta", "rcim", function(model, ...)
dfbetavlm(model, ...))
+hatvaluesbasic <- function(X_vlm,
+ diagWm,
+ M = 1) {
+
+
+
+ if (M > 1)
+ stop("currently argument 'M' must be 1")
+
+ nn <- nrow(X_vlm)
+ ncol_X_vlm = ncol(X_vlm)
+
+ XtW = t(c(diagWm) * X_vlm)
+
+
+ UU <- sqrt(diagWm) # Only for M == 1
+ UU.X_vlm <- UU * X_vlm
+
+ qrSlot <- qr(UU.X_vlm)
+ Rmat <- qr.R(qrSlot)
+
+ rinv = diag(ncol_X_vlm)
+ rinv = backsolve(Rmat, rinv)
+
+
+ Diag.Hat <- if (FALSE) {
+ covun = rinv %*% t(rinv)
+ rhs.mat <- covun %*% XtW
+ colSums(t(X_vlm) * rhs.mat)
+ } else {
+ mymat <- X_vlm %*% rinv
+ rowSums(diagWm * mymat^2)
+ }
+ Diag.Hat
+}
+
+
diff --git a/R/nobs.R b/R/nobs.R
index d243d60..583b3b3 100644
--- a/R/nobs.R
+++ b/R/nobs.R
@@ -61,7 +61,7 @@ setMethod("nobs", "vlm",
# ======================================================================
# 20110711
# Here is the 'nvar' methods functions.
-# Tricky for "vgam", "rrvglm", "qrrvglm", "cao", "rcam" objects?
+# Tricky for "vgam", "rrvglm", "qrrvglm", "cao", "rcim" objects?
nvar.vlm <- function(object, type = c("vlm", "lm"), ...) {
@@ -159,18 +159,18 @@ nvar.cao <- function(object, type = c("cao", "zz"), ...) {
-nvar.rcam <- function(object, type = c("rcam", "zz"), ...) {
+nvar.rcim <- function(object, type = c("rcim", "zz"), ...) {
# 20110711
# Uses the effective dof, or edof, or edf zz??
if(mode(type) != "character" && mode(type) != "name")
type <- as.character(substitute(type))
type <- match.arg(type,
- c("rcam", "zz"))[1]
+ c("rcim", "zz"))[1]
- stop("function nvar.rcam() has not been written yet")
+ stop("function nvar.rcim() has not been written yet")
- if (type == "rcam") {
+ if (type == "rcim") {
object at misc$p
} else {
object at misc$ncol_X_vlm
@@ -216,9 +216,9 @@ setMethod("nvar", "cao",
-setMethod("nvar", "rcam",
+setMethod("nvar", "rcim",
function(object, ...)
- nvar.rcam(object, ...))
+ nvar.rcim(object, ...))
# ======================================================================
diff --git a/R/plot.vglm.q b/R/plot.vglm.q
index ae90274..2e18dbe 100644
--- a/R/plot.vglm.q
+++ b/R/plot.vglm.q
@@ -105,11 +105,13 @@ plotvgam = function(x, newdata = NULL, y = NULL, residuals = NULL, rugplot = TRU
ylim.scale <- function(ylim, scale = 0) {
- if (length(ylim) != 2 || ylim[2] < ylim[1])
- stop("error in 'ylim'")
- try <- ylim[2] - ylim[1]
- if (try > scale) ylim else
- c(ylim[1]+ylim[2]-scale, ylim[1]+ylim[2]+scale) / 2
+ if (length(ylim) != 2 ||
+ ylim[2] < ylim[1])
+ stop("error in 'ylim'")
+ try <- ylim[2] - ylim[1]
+ if (try > scale) ylim else
+ c(ylim[1] + ylim[2] - scale,
+ ylim[1] + ylim[2] + scale) / 2
}
@@ -831,42 +833,43 @@ setMethod("plot", "vgam",
plotqrrvglm = function(object,
- rtype = c("pearson", "response", "deviance", "working"),
+ rtype = c("response", "pearson", "deviance", "working"),
ask = FALSE,
main = paste(Rtype, "residuals vs latent variable(s)"),
xlab = "Latent Variable",
ITolerances = object at control$EqualTolerances,
...) {
- M = object at misc$M
- n = object at misc$n
- Rank = object at control$Rank
- Coef.object = Coef(object, ITolerances = ITolerances)
- rtype <- match.arg(rtype, c("pearson", "response", "deviance", "working"))[1]
- res = resid(object, type=rtype)
-
- my.ylab = if (length(object at misc$ynames)) object at misc$ynames else
- rep(" ", len=M)
- Rtype = switch(rtype, pearson = "Pearson", response = "Response",
- deviance = "Deviance", working = "Working")
-
- done = 0
- for(rr in 1:Rank)
- for(ii in 1:M) {
- plot(Coef.object at lv[,rr], res[,ii],
- xlab=paste(xlab, if (Rank == 1) "" else rr, sep = ""),
- ylab=my.ylab[ii],
- main = main, ...)
- done = done + 1
- if (done >= prod(par()$mfrow) && ask && done != Rank*M) {
- done = 0
- readline("Hit return for the next plot: ")
- }
- }
- object
+ M <- object at misc$M
+ n <- object at misc$n
+ Rank <- object at control$Rank
+ Coef.object <- Coef(object, ITolerances = ITolerances)
+ rtype <- match.arg(rtype,
+ c("response", "pearson", "deviance", "working"))[1]
+ res <- resid(object, type = rtype)
+
+ my.ylab <- if (length(object at misc$ynames)) object at misc$ynames else
+ rep(" ", len = M)
+ Rtype <- switch(rtype, pearson = "Pearson", response = "Response",
+ deviance = "Deviance", working = "Working")
+
+ done <- 0
+ for(rr in 1:Rank)
+ for(ii in 1:M) {
+ plot(Coef.object at lv[,rr], res[,ii],
+ xlab = paste(xlab, if (Rank == 1) "" else rr, sep = ""),
+ ylab = my.ylab[ii],
+ main = main, ...)
+ done <- done + 1
+ if (done >= prod(par()$mfrow) && ask && done != Rank*M) {
+ done <- 0
+ readline("Hit return for the next plot: ")
+ }
+ }
+ object
}
setMethod("plot", "qrrvglm", function(x, y, ...)
- invisible(plotqrrvglm(object=x, ...)))
+ invisible(plotqrrvglm(object = x, ...)))
diff --git a/R/predict.vgam.q b/R/predict.vgam.q
index 995b7e2..d2569c4 100644
--- a/R/predict.vgam.q
+++ b/R/predict.vgam.q
@@ -6,13 +6,13 @@
-predict.vgam <- function(object, newdata=NULL,
- type=c("link", "response", "terms"),
- se.fit = FALSE, deriv.arg=0, terms.arg=NULL,
+predict.vgam <- function(object, newdata = NULL,
+ type = c("link", "response", "terms"),
+ se.fit = FALSE, deriv.arg = 0, terms.arg = NULL,
raw = FALSE,
- all = TRUE, offset=0,
+ all = TRUE, offset = 0,
untransform = FALSE,
- dispersion=NULL, ...)
+ dispersion = NULL, ...)
{
if (missing(newdata)) {
newdata <- NULL
@@ -79,8 +79,9 @@ predict.vgam <- function(object, newdata=NULL,
} else {
answer = object at predictors
}
- if (untransform) return(untransformVGAM(object, answer)) else
- return(answer)
+ if (untransform)
+ return(untransformVGAM(object, answer)) else
+ return(answer)
}
} else
if (type=="response") {
@@ -96,14 +97,14 @@ predict.vgam <- function(object, newdata=NULL,
}
predictor <- predict.vlm(object,
- type="terms",
- se.fit=se.fit,
- terms.arg=terms.arg,
- raw=raw,
- all=all, offset=offset,
- dispersion=dispersion, ...) # deriv.arg=deriv.arg,
+ type = "terms",
+ se.fit = se.fit,
+ terms.arg = terms.arg,
+ raw = raw,
+ all = all, offset = offset,
+ dispersion = dispersion, ...) # deriv.arg = deriv.arg,
- newdata <- model.matrixvlm(object, type="lm")
+ newdata <- model.matrixvlm(object, type = "lm")
} else {
@@ -112,12 +113,12 @@ predict.vgam <- function(object, newdata=NULL,
predictor <- predict.vlm(object, newdata,
- type=temp.type,
- se.fit=se.fit,
- terms.arg=terms.arg,
- raw=raw,
- all=all, offset=offset,
- dispersion=dispersion, ...) # deriv.arg=deriv.arg,
+ type = temp.type,
+ se.fit = se.fit,
+ terms.arg = terms.arg,
+ raw = raw,
+ all = all, offset = offset,
+ dispersion = dispersion, ...) # deriv.arg = deriv.arg,
}
@@ -174,8 +175,8 @@ predict.vgam <- function(object, newdata=NULL,
rawMat <- predictvsmooth.spline.fit(
object at Bspline[[ii]],
- x=xx,
- deriv=deriv.arg)$y
+ x = xx,
+ deriv = deriv.arg)$y
eta.mat <- if (raw) rawMat else (rawMat %*% t(Blist[[ii]]))
@@ -242,7 +243,7 @@ predict.vgam <- function(object, newdata=NULL,
}
}
if (se.fit) {
- return(list(fit=fv, se.fit=fv*NA))
+ return(list(fit = fv, se.fit = fv*NA))
} else {
return(fv)
}
@@ -280,7 +281,7 @@ predict.vgam <- function(object, newdata=NULL,
matrix(ans, ncol = lindex, byrow = TRUE) else 0
} else {
predictor[,index] <- if (deriv.arg==1)
- matrix(ans, ncol=lindex, byrow = TRUE) else 0
+ matrix(ans, ncol = lindex, byrow = TRUE) else 0
}
}
} else
diff --git a/R/predict.vglm.q b/R/predict.vglm.q
index 0e11ca2..c7d592f 100644
--- a/R/predict.vglm.q
+++ b/R/predict.vglm.q
@@ -6,14 +6,14 @@
-predictvglm = function(object,
- newdata=NULL,
- type=c("link", "response", "terms"),
- se.fit=FALSE,
- deriv=0,
- dispersion=NULL,
- untransform=FALSE,
- extra=object at extra, ...) {
+predictvglm <- function(object,
+ newdata = NULL,
+ type = c("link", "response", "terms"),
+ se.fit = FALSE,
+ deriv = 0,
+ dispersion = NULL,
+ untransform = FALSE,
+ extra = object at extra, ...) {
na.act = object at na.action
object at na.action = list()
@@ -29,7 +29,7 @@ predictvglm = function(object,
if (untransform && (type!="link" || se.fit || deriv != 0))
stop("argument 'untransform=TRUE' only if 'type=\"link\", ",
- "se.fit=FALSE, deriv=0'")
+ "se.fit = FALSE, deriv=0'")
@@ -38,7 +38,7 @@ predictvglm = function(object,
switch(type,
response = {
warning("'type=\"response\"' and 'se.fit=TRUE' not valid ",
- "together; setting 'se.fit=FALSE'")
+ "together; setting 'se.fit = FALSE'")
se.fit = FALSE
predictor = predict.vlm(object, newdata=newdata,
type=type, se.fit=se.fit,
@@ -135,13 +135,13 @@ setMethod("predict", "vglm", function(object, ...)
-predict.rrvglm = function(object,
- newdata=NULL,
- type=c("link", "response", "terms"),
- se.fit=FALSE,
- deriv=0,
- dispersion=NULL,
- extra=object at extra, ...) {
+predict.rrvglm <- function(object,
+ newdata = NULL,
+ type = c("link", "response", "terms"),
+ se.fit = FALSE,
+ deriv = 0,
+ dispersion = NULL,
+ extra = object at extra, ...) {
if (se.fit) {
stop("11/8/03; predict.rrvglm(..., se.fit=TRUE) not complete yet")
@@ -149,7 +149,7 @@ predict.rrvglm = function(object,
switch(type,
response = {
warning("'type=\"response\"' and 'se.fit=TRUE' not valid ",
- "together; setting 'se.fit=FALSE'")
+ "together; setting 'se.fit = FALSE'")
se.fit = FALSE
predictor = predict.vlm(object, newdata=newdata,
type=type, se.fit=se.fit,
@@ -199,25 +199,85 @@ setMethod("predict", "rrvglm", function(object, ...)
-untransformVGAM = function(object, pred) {
- M = object at misc$M
- Links = object at misc$link
- if (length(Links) != M && length(Links) != 1)
- stop("cannot obtain the link functions to untransform the object")
- upred = pred
- earg = object at misc$earg
- for(ii in 1:M) {
- TTheta = pred[,ii] # Transformed theta
- newcall = paste(Links[ii], "(theta=TTheta, earg=earg, inverse=TRUE)", sep="")
- newcall = parse(text=newcall)[[1]]
- Theta = eval(newcall) # Theta, the untransformed parameter
- upred[,ii] = Theta
- }
- dmn2 = if (length(names(object at misc$link))) names(object at misc$link) else {
- if (length(object at misc$parameters)) object at misc$parameters else NULL
- }
- dimnames(upred) = list(dimnames(upred)[[1]], dmn2)
- upred
+untransformVGAM <- function(object, pred) {
+ M <- object at misc$M
+ Links <- object at misc$link
+ if (length(Links) != M && length(Links) != 1)
+ stop("cannot obtain the link functions to untransform the object")
+
+ upred <- pred
+ earg <- object at misc$earg
+
+
+
+
+
+
+ LINK <- object at misc$link # link.names # This should be a character vector.
+ EARG <- object at misc$earg # This could be a NULL
+ if (is.null(EARG))
+ EARG <- list(theta = NULL)
+ if (!is.list(EARG))
+ stop("the 'earg' component of 'object at misc' must be a list")
+
+ if (length(LINK) != M &&
+ length(LINK) != 1)
+ stop("cannot obtain the link functions to untransform 'object'")
+
+
+
+ if (!is.character(LINK))
+ stop("the 'link' component of 'object at misc' should ",
+ "be a character vector")
+
+ learg <- length(EARG)
+ llink <- length(LINK)
+ if (llink != learg)
+ stop("the 'earg' component of 'object at misc' should ",
+ "be a list of length ", learg)
+
+
+ level1 <- length(EARG) > 3 &&
+ length(intersect(names(EARG),
+ c("theta", "inverse", "deriv", "short", "tag"))) > 3
+ if (level1)
+ EARG <- list(oneOnly = EARG)
+
+
+
+ learg <- length(EARG)
+
+
+
+
+
+ for(ii in 1:M) {
+ TTheta <- pred[, ii] # Transformed theta
+
+
+ use.earg <-
+ if (llink == 1) EARG[[1]] else EARG[[ii]]
+ function.name <-
+ if (llink == 1) LINK else LINK[ii]
+
+
+ use.earg[["inverse"]] <- TRUE # New
+ use.earg[["theta"]] <- TTheta # New
+ Theta <- do.call(function.name, use.earg)
+
+
+
+
+
+
+ upred[, ii] <- Theta
+ }
+
+ dmn2 <- if (length(names(object at misc$link))) names(object at misc$link) else {
+ if (length(object at misc$parameters)) object at misc$parameters else NULL
+ }
+ dimnames(upred) <- list(dimnames(upred)[[1]], dmn2)
+ upred
}
diff --git a/R/predict.vlm.q b/R/predict.vlm.q
index 1c099e9..5e05800 100644
--- a/R/predict.vlm.q
+++ b/R/predict.vlm.q
@@ -8,10 +8,10 @@
predict.vlm = function(object,
- newdata=NULL,
- type=c("response","terms"),
+ newdata = NULL,
+ type = c("response", "terms"),
se.fit = FALSE, scale = NULL,
- terms.arg=NULL,
+ terms.arg = NULL,
raw=FALSE,
dispersion = NULL, ...)
{
@@ -20,7 +20,7 @@ predict.vlm = function(object,
if (mode(type) != "character" && mode(type) != "name")
type = as.character(substitute(type))
- type = match.arg(type, c("response","terms"))[1]
+ type = match.arg(type, c("response", "terms"))[1]
na.act = object at na.action
object at na.action = list()
diff --git a/R/qtplot.q b/R/qtplot.q
index 51f0d43..0f8b80e 100644
--- a/R/qtplot.q
+++ b/R/qtplot.q
@@ -71,6 +71,7 @@ qtplot.lms.yjn <- function(percentiles = c(25,50,75),
answer
}
+
qtplot.default <- function(object, ...) {
warning("no methods function. Returning the object")
@@ -84,7 +85,7 @@ qtplot.default <- function(object, ...) {
LL <- length(object at family@vfamily)
newcall = paste("qtplot.", object at family@vfamily[LL],
"(object, ...)", sep = "")
- newcall = parse(text=newcall)[[1]]
+ newcall = parse(text = newcall)[[1]]
if (Attach) {
object at post$qtplot = eval(newcall)
@@ -104,33 +105,46 @@ qtplot.lmscreg <- function(object,
lp <- length(percentiles)
if (same) {
- fitted.values <- if (!length(newdata)) object at fitted.values else {
- predict(object, newdata=newdata, type = "response")
- }
+ fitted.values <- if (!length(newdata))
+ object at fitted.values else {
+ predict(object, newdata = newdata, type = "response")
+ }
fitted.values <- as.matrix(fitted.values)
} else {
if (!is.numeric(percentiles))
stop("'percentiles' must be specified")
- eta <- if (length(newdata)) predict(object, newdata=newdata, type = "link") else
- object at predictors
- eta <- eta2theta(eta, object at misc$link) # Now lambda, mu, sigma
+ eta <- if (length(newdata))
+ predict(object, newdata = newdata, type = "link") else
+ object at predictors
+
+
+ if (!length(double.check.earg <- object at misc$earg))
+ double.check.earg <- list(theta = NULL)
+ eta <- eta2theta(eta, link = object at misc$link,
+ earg = double.check.earg) # lambda, mu, sigma
+
+
if (!is.logical(expectiles <- object at misc$expectiles)) {
expectiles <- FALSE
}
newcall = paste(if (expectiles) "explot." else "qtplot.",
- object at family@vfamily[1], "(percentiles = percentiles",
- ", eta = eta, yoffset=object at misc$yoffset)", sep = "")
- newcall = parse(text=newcall)[[1]]
+ object at family@vfamily[1],
+ "(percentiles = percentiles",
+ ", eta = eta, yoffset=object at misc$yoffset)",
+ sep = "")
+ newcall = parse(text = newcall)[[1]]
fitted.values = as.matrix( eval(newcall) )
- dimnames(fitted.values) <- list(dimnames(eta)[[1]],
- paste(as.character(percentiles), "%", sep = ""))
+ dimnames(fitted.values) <-
+ list(dimnames(eta)[[1]],
+ paste(as.character(percentiles), "%", sep = ""))
}
if (plot.it) {
- plotqtplot.lmscreg(fitted.values = fitted.values, object = object,
+ plotqtplot.lmscreg(fitted.values = fitted.values,
+ object = object,
newdata = newdata,
lp = lp,
percentiles = percentiles, ...)
@@ -183,7 +197,7 @@ plotqtplot.lmscreg <- function(fitted.values, object,
if (!is.numeric(ylim))
ylim <- c(min(fred), max(fred))
matplot(x=xx, y=fred,
- xlab=xlab, ylab=ylab, type = "n",
+ xlab = xlab, ylab = ylab, type = "n",
xlim=xlim, ylim=ylim, ...)
}
@@ -217,7 +231,7 @@ plotqtplot.lmscreg <- function(fitted.values, object,
if (!is.numeric(ylim))
ylim <- c(min(fitted.values), max(fitted.values))
matplot(x=xx, y=fitted.values,
- xlab=xlab, ylab=ylab, type = "n",
+ xlab = xlab, ylab = ylab, type = "n",
xlim=xlim, ylim=ylim, col = pcol.arg)
}
if (y && length(object at y))
@@ -235,16 +249,18 @@ plotqtplot.lmscreg <- function(fitted.values, object,
temp <- temp[sort.list(temp[, 1]),]
index <- !duplicated(temp[, 1])
if (spline.fit) {
- lines(spline(temp[index, 1], temp[index, 2]),
- lty = llty.arg[ii], col = lcol.arg[ii], err=-1, lwd = llwd.arg[ii])
+ lines(spline(temp[index, 1], temp[index, 2]),
+ lty = llty.arg[ii], col = lcol.arg[ii], err = -1,
+ lwd = llwd.arg[ii])
} else {
lines(temp[index, 1], temp[index, 2],
- lty = llty.arg[ii], col = lcol.arg[ii], err=-1, lwd = llwd.arg[ii])
+ lty = llty.arg[ii], col = lcol.arg[ii], err = -1,
+ lwd = llwd.arg[ii])
}
if (label)
text(par()$usr[2], temp[nrow(temp), 2],
paste( percentiles[ii], "%", sep = ""),
- adj=tadj, col=tcol.arg[ii], err=-1)
+ adj = tadj, col = tcol.arg[ii], err = -1)
}
invisible(fitted.values)
@@ -252,16 +268,17 @@ plotqtplot.lmscreg <- function(fitted.values, object,
if (TRUE) {
- if (!isGeneric("qtplot"))
- setGeneric("qtplot", function(object, ...) standardGeneric("qtplot"))
+ if (!isGeneric("qtplot"))
+ setGeneric("qtplot", function(object, ...)
+ standardGeneric("qtplot"))
- setMethod("qtplot", signature(object = "vglm"),
- function(object, ...)
- invisible(qtplot.vglm(object, ...)))
- setMethod("qtplot", signature(object = "vgam"),
- function(object, ...)
- invisible(qtplot.vglm(object, ...)))
+ setMethod("qtplot", signature(object = "vglm"),
+ function(object, ...)
+ invisible(qtplot.vglm(object, ...)))
+ setMethod("qtplot", signature(object = "vgam"),
+ function(object, ...)
+ invisible(qtplot.vglm(object, ...)))
}
@@ -274,8 +291,8 @@ if (TRUE) {
newcall = paste("qtplot.", object at family@vfamily[1],
- "(object=object, ... )", sep = "")
- newcall = parse(text=newcall)[[1]]
+ "(object = object, ... )", sep = "")
+ newcall = parse(text = newcall)[[1]]
eval(newcall)
}
@@ -344,8 +361,8 @@ qtplot.gumbel <-
object at s.xargument else names(object at assign)[2]
if (!add.arg)
- matplot(x=xx, y=cbind(object at y, fitted.values), main=main,
- xlab=xlab, ylab=ylab, type = "n", ...)
+ matplot(x=xx, y=cbind(object at y, fitted.values), main = main,
+ xlab = xlab, ylab = ylab, type = "n", ...)
if (y.arg) {
matpoints(x=xx, y=object at y, pch = pch, col = pcol.arg)
@@ -370,7 +387,7 @@ qtplot.gumbel <-
if (label) {
mylabel = (dimnames(answer$fitted)[[2]])[ii]
text(par()$usr[2], temp[nrow(temp), 2],
- mylabel, adj=tadj, col=tcol.arg[ii], err=-1)
+ mylabel, adj=tadj, col=tcol.arg[ii], err = -1)
}
}
@@ -451,7 +468,7 @@ deplot.default <- function(object, ...) {
LL <- length(object at family@vfamily)
newcall = paste("deplot.", object at family@vfamily[LL],
"(object, ...)", sep = "")
- newcall = parse(text=newcall)[[1]]
+ newcall = parse(text = newcall)[[1]]
if (Attach) {
object at post$deplot = eval(newcall)
@@ -476,16 +493,22 @@ deplot.default <- function(object, ...) {
ii <- if (object at misc$nonparametric)
slot(object, "s.xargument") else NULL
if (length(ii) && any(logic.vec <-
- names(slot(object, "s.xargument"))==var1name))
- names(newdata) <- ii[logic.vec] # should be the first one
+ names(slot(object, "s.xargument")) == var1name))
+ names(newdata) <- ii[logic.vec] # should be the first one
}
- eta0 = if (length(newdata)) predict(object, newdata) else predict(object)
- eta0 <- eta2theta(eta0, object at misc$link) # lambda, mu, sigma
+ eta0 <- if (length(newdata)) predict(object, newdata) else
+ predict(object)
+
+ if (!length(double.check.earg <- object at misc$earg))
+ double.check.earg <- list(theta = NULL)
+ eta0 <- eta2theta(eta0, link = object at misc$link,
+ earg = double.check.earg) # lambda, mu, sigma
newcall = paste("deplot.", object at family@vfamily[1],
- "(object, newdata, y.arg=y.arg, eta0 = eta0)", sep = "")
- newcall = parse(text=newcall)[[1]]
+ "(object, newdata, y.arg = y.arg, eta0 = eta0)",
+ sep = "")
+ newcall = parse(text = newcall)[[1]]
answer = eval(newcall)
if (plot.it)
@@ -514,7 +537,7 @@ plotdeplot.lmscreg <- function(answer,
if (!is.numeric(ylim))
ylim <- c(min(yvec), max(yvec))
matplot(x=xx, y=yvec,
- xlab=xlab, ylab=ylab, type = "n",
+ xlab = xlab, ylab = ylab, type = "n",
xlim=xlim, ylim=ylim, ...)
}
@@ -522,7 +545,7 @@ plotdeplot.lmscreg <- function(answer,
temp <- temp[sort.list(temp[, 1]),]
index <- !duplicated(temp[, 1])
lines(temp[index, 1], temp[index, 2],
- lty = llty.arg, col=col.arg, err=-1, lwd = llwd.arg)
+ lty = llty.arg, col = col.arg, err = -1, lwd = llwd.arg)
invisible(answer)
}
@@ -565,7 +588,7 @@ if (TRUE) {
LL <- length(object at family@vfamily)
newcall = paste("cdf.", object at family@vfamily[LL],
"(object, newdata, ...)", sep = "")
- newcall = parse(text=newcall)[[1]]
+ newcall = parse(text = newcall)[[1]]
if (Attach) {
object at post$cdf = eval(newcall)
@@ -581,18 +604,22 @@ if (TRUE) {
- if (!length(newdata))
- return(object at post$cdf)
+ if (!length(newdata))
+ return(object at post$cdf)
- eta0 = if (length(newdata)) predict(object, newdata) else predict(object)
- eta0 <- eta2theta(eta0, link=object at misc$link) # lambda, mu, sigma
+ eta0 = if (length(newdata)) predict(object, newdata) else predict(object)
- y = vgety(object, newdata) # Includes yoffset
+ if (!length(double.check.earg <- object at misc$earg))
+ double.check.earg <- list(theta = NULL)
+ eta0 <- eta2theta(eta0, link = object at misc$link,
+ earg = double.check.earg) # lambda, mu, sigma
- newcall = paste("cdf.", object at family@vfamily[1],
- "(y, eta0, ... )", sep = "")
- newcall = parse(text=newcall)[[1]]
- eval(newcall)
+ y = vgety(object, newdata) # Includes yoffset
+
+ newcall = paste("cdf.", object at family@vfamily[1],
+ "(y, eta0, ... )", sep = "")
+ newcall = parse(text = newcall)[[1]]
+ eval(newcall)
}
@@ -652,7 +679,7 @@ vgety = function(object, newdata = NULL) {
LL <- length(object at family@vfamily)
newcall = paste("rlplot.", object at family@vfamily[LL],
"(object, ...)", sep = "")
- newcall = parse(text=newcall)[[1]]
+ newcall = parse(text = newcall)[[1]]
if (Attach) {
object at post$rlplot = eval(newcall)
@@ -678,18 +705,20 @@ vgety = function(object, newdata = NULL) {
rlplot.egev <-
rlplot.gev <-
- function(object, plot.it = TRUE,
- probability = c((1:9)/100, (1:9)/10, 0.95, 0.99, 0.995, 0.999),
- add.arg = FALSE,
- xlab = "Return Period",ylab = "Return Level", main = "Return Level Plot",
- pch = par()$pch, pcol.arg = par()$col, pcex = par()$cex,
- llty.arg = par()$lty, lcol.arg = par()$col, llwd.arg = par()$lwd,
- slty.arg = par()$lty, scol.arg = par()$col, slwd.arg = par()$lwd,
- ylim = NULL,
- log = TRUE,
- CI = TRUE,
- epsilon = 1.0e-05,
- ...)
+ function(object, plot.it = TRUE,
+ probability = c((1:9)/100, (1:9)/10, 0.95, 0.99, 0.995, 0.999),
+ add.arg = FALSE,
+ xlab = "Return Period",
+ ylab = "Return Level",
+ main = "Return Level Plot",
+ pch = par()$pch, pcol.arg = par()$col, pcex = par()$cex,
+ llty.arg = par()$lty, lcol.arg = par()$col, llwd.arg = par()$lwd,
+ slty.arg = par()$lty, scol.arg = par()$col, slwd.arg = par()$lwd,
+ ylim = NULL,
+ log = TRUE,
+ CI = TRUE,
+ epsilon = 1.0e-05,
+ ...)
{
log.arg = log
rm(log)
@@ -722,7 +751,7 @@ rlplot.gev <-
plot(log(1/yp), zp, log = "", type = "n",
ylim = if (length(ylim)) ylim else
c(min(c(ydata, zp)), max(c(ydata, zp))),
- xlab=xlab, ylab=ylab, main=main, ...)
+ xlab = xlab, ylab = ylab, main = main, ...)
points(log(-1/log((1:n)/(n+1))), ydata, col = pcol.arg,
pch = pch, cex = pcex)
lines(log(1/yp), zp,
@@ -732,7 +761,7 @@ rlplot.gev <-
plot(1/yp, zp, log = "x", type = "n",
ylim = if (length(ylim)) ylim else
c(min(c(ydata, zp)), max(c(ydata, zp))),
- xlab=xlab, ylab=ylab, main=main, ...)
+ xlab = xlab, ylab = ylab, main = main, ...)
points(-1/log((1:n)/(n+1)), ydata, col = pcol.arg,
pch = pch, cex = pcex)
lines(1/yp, zp, lwd = llwd.arg, col = lcol.arg, lty = llty.arg)
@@ -750,12 +779,12 @@ rlplot.gev <-
newcall = paste(Links[ii],
"(theta=TTheta, earg=use.earg, inverse = TRUE)",
sep = "")
- newcall = parse(text=newcall)[[1]]
+ newcall = parse(text = newcall)[[1]]
uteta = eval(newcall) # Theta, the untransformed parameter
uteta = uteta + epsilon # perturb it
newcall = paste(Links[ii],
"(theta=uteta, earg=use.earg)", sep = "")
- newcall = parse(text=newcall)[[1]]
+ newcall = parse(text = newcall)[[1]]
teta = eval(newcall) # The transformed parameter
peta = eta
peta[, ii] = teta
diff --git a/R/residuals.vlm.q b/R/residuals.vlm.q
index 5055212..55c3f00 100644
--- a/R/residuals.vlm.q
+++ b/R/residuals.vlm.q
@@ -195,63 +195,63 @@ residualsvglm <- function(object,
residualsqrrvglm <- function(object,
type = c("response"),
- matrix.arg=TRUE)
+ matrix.arg = TRUE)
{
- if (mode(type) != "character" && mode(type) != "name")
- type <- as.character(substitute(type))
- type <- match.arg(type,
- c("response"))[1]
-
- na.act = object at na.action
- object at na.action = list()
-
- pooled.weight <- object at misc$pooled.weight
- if (is.null(pooled.weight))
- pooled.weight <- FALSE
-
- answer =
- switch(type,
- working = if (pooled.weight) NULL else object at residuals,
- pearson = {
- stop("have not programmed pearson resids yet")
- },
- deviance = {
- stop("have not programmed deviance resids yet")
- },
- ldot = {
- stop("have not programmed ldot resids yet")
- },
- response = {
- y <- object at y
- mu <- fitted(object)
-
- true.mu <- object at misc$true.mu
- if (is.null(true.mu))
- true.mu <- TRUE
-
- ans <- if (true.mu) y - mu else NULL
-
-
- if (!matrix.arg && length(ans)) {
- if (ncol(ans) == 1) {
- names.ans = dimnames(ans)[[1]]
- ans = c(ans)
- names(ans) = names.ans
- ans
- } else {
- warning("ncol(ans) is not 1")
- ans
- }
- } else ans
- })
-
- if (length(answer) && length(na.act)) {
- napredict(na.act[[1]], answer)
- } else {
- answer
- }
+ if (mode(type) != "character" && mode(type) != "name")
+ type <- as.character(substitute(type))
+ type <- match.arg(type,
+ c("response"))[1]
+
+ na.act = object at na.action
+ object at na.action = list()
+
+ pooled.weight <- object at misc$pooled.weight
+ if (is.null(pooled.weight))
+ pooled.weight <- FALSE
+
+ answer =
+ switch(type,
+ working = if (pooled.weight) NULL else object at residuals,
+ pearson = {
+ stop("have not programmed pearson resids yet")
+ },
+ deviance = {
+ stop("have not programmed deviance resids yet")
+ },
+ ldot = {
+ stop("have not programmed ldot resids yet")
+ },
+ response = {
+ y <- object at y
+ mu <- fitted(object)
+
+ true.mu <- object at misc$true.mu
+ if (is.null(true.mu))
+ true.mu <- TRUE
+
+ ans <- if (true.mu) y - mu else NULL
+
+
+ if (!matrix.arg && length(ans)) {
+ if (ncol(ans) == 1) {
+ names.ans = dimnames(ans)[[1]]
+ ans = c(ans)
+ names(ans) = names.ans
+ ans
+ } else {
+ warning("ncol(ans) is not 1")
+ ans
+ }
+ } else ans
+ })
+
+ if (length(answer) && length(na.act)) {
+ napredict(na.act[[1]], answer)
+ } else {
+ answer
+ }
}
diff --git a/R/rrvglm.control.q b/R/rrvglm.control.q
index e207d70..1198712 100644
--- a/R/rrvglm.control.q
+++ b/R/rrvglm.control.q
@@ -40,23 +40,23 @@ rrvglm.control = function(Rank = 1,
if (!is.Numeric(Rank, positive = TRUE,
allowable.length = 1, integer.valued = TRUE))
- stop("bad input for 'Rank'")
+ stop("bad input for 'Rank'")
if (!is.Numeric(Alpha, positive = TRUE,
allowable.length = 1) || Alpha > 1)
- stop("bad input for 'Alpha'")
+ stop("bad input for 'Alpha'")
if (!is.Numeric(Bestof, positive = TRUE,
allowable.length = 1, integer.valued = TRUE))
- stop("bad input for 'Bestof'")
+ stop("bad input for 'Bestof'")
if (!is.Numeric(SD.Ainit, positive = TRUE,
allowable.length = 1))
- stop("bad input for 'SD.Ainit'")
+ stop("bad input for 'SD.Ainit'")
if (!is.Numeric(SD.Cinit, positive = TRUE,
allowable.length = 1))
- stop("bad input for 'SD.Cinit'")
+ stop("bad input for 'SD.Cinit'")
if (!is.Numeric(Etamat.colmax, positive = TRUE,
allowable.length = 1) ||
Etamat.colmax < Rank)
- stop("bad input for 'Etamat.colmax'")
+ stop("bad input for 'Etamat.colmax'")
if (length(szero) &&
(any(round(szero) != szero) ||
@@ -76,21 +76,26 @@ rrvglm.control = function(Rank = 1,
stop("Quadratic model can only be fitted using the derivative algorithm")
if (Corner && (Svd.arg || Uncorrelated.lv || length(Wmat)))
- stop("cannot have Corner = TRUE and either Svd = TRUE or Uncorrelated.lv = TRUE or Wmat")
+ stop("cannot have 'Corner = TRUE' and either 'Svd = TRUE' or ",
+ "'Uncorrelated.lv = TRUE' or Wmat")
if (Corner && length(intersect(szero, Index.corner)))
- stop("cannot have szero and Index.corner having common values")
+ stop("cannot have 'szero' and 'Index.corner' having ",
+ "common values")
if (length(Index.corner) != Rank)
- stop("length(Index.corner) != Rank")
+ stop("length(Index.corner) != Rank")
- if (!is.logical(checkwz) || length(checkwz) != 1)
- stop("bad input for 'checkwz'")
- if (!is.Numeric(wzepsilon, allowable.length = 1, positive = TRUE))
- stop("bad input for 'wzepsilon'")
+ if (!is.logical(checkwz) ||
+ length(checkwz) != 1)
+ stop("bad input for 'checkwz'")
+
+ if (!is.Numeric(wzepsilon, allowable.length = 1,
+ positive = TRUE))
+ stop("bad input for 'wzepsilon'")
if (class(Norrr) != "formula" && !is.null(Norrr))
- stop("argument 'Norrr' should be a formula or a NULL")
+ stop("argument 'Norrr' should be a formula or a NULL")
ans =
c(vglm.control(trace = trace, ...),
@@ -105,7 +110,9 @@ rrvglm.control = function(Rank = 1,
Cinit = Cinit,
Index.corner = Index.corner,
Norrr = Norrr,
- Corner = Corner, Uncorrelated.lv = Uncorrelated.lv, Wmat = Wmat,
+ Corner = Corner,
+ Uncorrelated.lv = Uncorrelated.lv,
+ Wmat = Wmat,
OptimizeWrtC = TRUE, # OptimizeWrtC,
Quadratic = FALSE, # A constant now, here.
SD.Ainit = SD.Ainit,
diff --git a/R/s.vam.q b/R/s.vam.q
index c5d6356..f0da59a 100644
--- a/R/s.vam.q
+++ b/R/s.vam.q
@@ -6,11 +6,14 @@
+
+
+
s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
- bf.epsilon=0.001, trace=FALSE, se.fit = TRUE,
+ bf.epsilon = 0.001, trace = FALSE, se.fit = TRUE,
X_vlm_save, Blist, ncolBlist, M, qbig, Umat,
- all.knots=FALSE, nk=NULL,
- sf.only=FALSE)
+ all.knots = FALSE, nk = NULL,
+ sf.only = FALSE)
{
nwhich <- names(which)
@@ -20,7 +23,7 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
if (!length(smooth.frame$first)) {
- data <- smooth.frame[, nwhich, drop=FALSE]
+ data <- smooth.frame[, nwhich, drop = FALSE]
smooth.frame <- vgam.match(data, all.knots=all.knots, nk=nk)
smooth.frame$first <- FALSE # No longer first for next time
@@ -38,29 +41,29 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
temp <- sparv[[ii]]
if (!is.numeric(temp) || any(temp < 0)) {
- stop("spar cannot be negative or non-numeric")
+ stop("spar cannot be negative or non-numeric")
}
if (length(temp) > ncolBlist[ii]) {
- warning("only the first ", ncolBlist[ii], " values of ",
- "'spar' are used for variable '", s.xargument, "'")
+ warning("only the first ", ncolBlist[ii], " values of ",
+ "'spar' are used for variable '", s.xargument, "'")
}
- sparv[[ii]] <- rep(temp, length=ncolBlist[ii]) # recycle
+ sparv[[ii]] <- rep(temp, length = ncolBlist[ii]) # recycle
temp <- dfvec[[ii]]
if (!is.numeric(temp) || any(temp < 1)) {
- stop("df is non-numeric or less than 1")
+ stop("df is non-numeric or less than 1")
}
if (length(temp) > ncolBlist[ii]) {
- warning("only the first", ncolBlist[ii], "values of 'df' ",
- "are used for variable '", s.xargument, "'")
+ warning("only the first ", ncolBlist[ii], " value(s) of 'df' ",
+ "are used for variable '", s.xargument, "'")
}
- dfvec[[ii]] <- rep(temp, length=ncolBlist[ii]) # recycle
+ dfvec[[ii]] <- rep(temp, length = ncolBlist[ii]) # recycle
if (max(temp) > smooth.frame$nef[kk]-1) {
- stop("'df' value too high for variable '", s.xargument, "'")
+ stop("'df' value too high for variable '", s.xargument, "'")
}
if (any(sparv[[ii]] != 0) && any(dfvec[[ii]] != 4)) {
- stop("cannot specify both 'spar' and 'df'")
+ stop("cannot specify both 'spar' and 'df'")
}
} # End of kk loop
@@ -70,8 +73,8 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
smooth.frame$dfvec <- dfvec # original
if (sum(smooth.frame$dfvec[smooth.frame$sparv == 0]) + pbig >
- smooth.frame$n_lm * sum(ncolBlist[nwhich])) {
- stop("too many parameters/dof for data on hand")
+ smooth.frame$n_lm * sum(ncolBlist[nwhich])) {
+ stop("too many parameters/dof for data on hand")
}
xnrow_X_vlm <- labels(X_vlm_save)[[2]]
@@ -96,6 +99,8 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
smooth.frame$kindex = as.integer(
cumsum(c(1, 4 + smooth.frame$nknots)))
} # End of first
+
+
if (sf.only) {
return(smooth.frame)
}
@@ -137,13 +142,14 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
eps = 0.00244, # was default till R 1.3.x
maxit = 500 )
+
fit <- dotC(name="Yee_vbfa", # ---------------------------------
npetc = as.integer(c(n_lm, p_lm, length(which), se.fit, 0,
bf.maxit, qrank = 0, M, nbig = n_lm * M, pbig,
- qbig, dim2wz, dim1U, ier=0, ldk=ldk, # ldk may be unused
+ qbig, dim2wz, dim1U, ier = 0, ldk=ldk, # ldk may be unused
contr.sp$maxit, iinfo = 0
)),
- doubvec = as.double(c(bf.epsilon, resSS=0, unlist(contr.sp[1:4]))),
+ doubvec = as.double(c(bf.epsilon, resSS = 0, unlist(contr.sp[1:4]))),
as.double(x),
y = as.double(zedd), wz = as.double(wz),
dfvec = as.double(smooth.frame$dfvec),
@@ -176,10 +182,10 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
dim(fit$smomat) = dim(smomat)
dimnames(fit$smomat) = dimnames(smomat) # Needed for vgam.nlchisq
if (se.fit) {
- dim(fit$varmat) = dim(smomat)
- dimnames(fit$varmat) = dimnames(smomat)
- dim(fit$levmat) = dim(smomat)
- dimnames(fit$levmat) = dimnames(smomat)
+ dim(fit$varmat) = dim(smomat)
+ dimnames(fit$varmat) = dimnames(smomat)
+ dim(fit$levmat) = dim(smomat)
+ dimnames(fit$levmat) = dimnames(smomat)
}
@@ -189,10 +195,10 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
if (fit$npetc[14] != 0 || fit$npetc[17] != 0) {
- stop("something went wrong in the C function 'vbfa'")
+ stop("something went wrong in the C function 'vbfa'")
}
- fit$etamat = if (M > 1) matrix(fit$etamat, n_lm, M, byrow=TRUE) else
+ fit$etamat = if (M > 1) matrix(fit$etamat, n_lm, M, byrow = TRUE) else
c(fit$etamat) # May no longer be a matrix
nits <- fit$npetc[5]
qrank <- fit$npetc[7]
@@ -208,8 +214,8 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
smooth.frame$prev.dof <- fit$dfvec
if ((nits == bf.maxit) & bf.maxit > 1) {
- warning("'s.vam' convergence not obtained in ", bf.maxit,
- " iterations")
+ warning("'s.vam' convergence not obtained in ", bf.maxit,
+ " iterations")
}
R <- fit$qr[1:pbig, 1:pbig]
@@ -253,7 +259,7 @@ s.vam <- function(x, zedd, wz, smomat, which, smooth.frame, bf.maxit = 10,
names(rl$nl.df) <- smooth.frame$ndfspar
if (se.fit) {
- rl <- c(rl, list(varmat = fit$varmat))
+ rl <- c(rl, list(varmat = fit$varmat))
}
c(list(smooth.frame = smooth.frame), rl)
}
diff --git a/R/smart.R b/R/smart.R
index 32a8445..d9f02ca 100644
--- a/R/smart.R
+++ b/R/smart.R
@@ -217,8 +217,8 @@ function (x, df = NULL, knots = NULL, degree = 3, intercept = FALSE,
}
Aknots <- sort(c(rep(Boundary.knots, ord), knots))
if (any(outside)) {
- warning(
-"some 'x' values beyond boundary knots may cause ill-conditioned bases")
+ warning("some 'x' values beyond boundary knots may ",
+ "cause ill-conditioned bases")
derivs <- 0:degree
scalef <- gamma(1L:ord)
basis <- array(0, c(length(x), length(Aknots) - degree -
@@ -250,8 +250,11 @@ function (x, df = NULL, knots = NULL, degree = 3, intercept = FALSE,
basis <- nmat
}
dimnames(basis) <- list(nx, 1L:n.col)
- a <- list(degree = degree, knots = if (is.null(knots)) numeric(0L) else knots,
- Boundary.knots = Boundary.knots, intercept = intercept)
+ a <- list(degree = degree,
+ knots = if (is.null(knots)) numeric(0L) else knots,
+ Boundary.knots = Boundary.knots,
+ intercept = intercept,
+ Aknots = Aknots)
attributes(basis) <- c(attributes(basis), a)
class(basis) <- c("bs", "basis", "matrix")
@@ -337,8 +340,11 @@ function (x, df = NULL, knots = NULL, intercept = FALSE, Boundary.knots = range(
basis <- nmat
}
dimnames(basis) <- list(nx, 1L:n.col)
- a <- list(degree = 3, knots = if (is.null(knots)) numeric(0) else knots,
- Boundary.knots = Boundary.knots, intercept = intercept)
+ a <- list(degree = 3,
+ knots = if (is.null(knots)) numeric(0) else knots,
+ Boundary.knots = Boundary.knots,
+ intercept = intercept,
+ Aknots = Aknots)
attributes(basis) <- c(attributes(basis), a)
class(basis) <- c("ns", "basis", "matrix")
diff --git a/R/summary.vglm.q b/R/summary.vglm.q
index 678b4fc..547e1a2 100644
--- a/R/summary.vglm.q
+++ b/R/summary.vglm.q
@@ -8,8 +8,13 @@
-yformat = function(x, digits = options()$digits) {
- format(ifelse(abs(x) < 0.001, signif(x, digits), round(x, digits)))
+
+
+
+
+
+yformat <- function(x, digits = options()$digits) {
+ format(ifelse(abs(x) < 0.001, signif(x, digits), round(x, digits)))
}
@@ -20,7 +25,10 @@ summaryvglm <- function(object, correlation = FALSE,
- if (length(dispersion) && dispersion == 0 &&
+
+
+ if (length(dispersion) &&
+ dispersion == 0 &&
length(object at family@summary.dispersion) &&
!object at family@summary.dispersion) {
stop("cannot use the general VGLM formula (based on a residual ",
@@ -44,12 +52,12 @@ summaryvglm <- function(object, correlation = FALSE,
presid = resid(object, type = "pearson")
if (length(presid))
- answer at pearson.resid = as.matrix(presid)
+ answer at pearson.resid <- as.matrix(presid)
- slot(answer, "misc") = stuff at misc # Replace
+ slot(answer, "misc") <- stuff at misc # Replace
if (is.numeric(stuff at dispersion))
- slot(answer, "dispersion") = stuff at dispersion
+ slot(answer, "dispersion") <- stuff at dispersion
answer
}
@@ -61,7 +69,7 @@ summaryvglm <- function(object, correlation = FALSE,
setMethod("logLik", "summary.vglm", function(object, ...)
- logLik.vlm(object, ...))
+ logLik.vlm(object, ...))
show.summary.vglm <- function(x, digits = NULL, quote = TRUE,
@@ -108,7 +116,7 @@ show.summary.vglm <- function(x, digits = NULL, quote = TRUE,
} else
if (M <= 5) {
cat("\nNames of linear predictors:",
- paste(x at misc$predictors.names, collapse = ", "), fill = TRUE)
+ paste(x at misc$predictors.names, collapse = ", "), fill = TRUE)
}
}
@@ -127,8 +135,9 @@ show.summary.vglm <- function(x, digits = NULL, quote = TRUE,
prose <- "(Pre-specified) "
}
cat(paste("\n", prose, "Dispersion Parameter for ",
- x at family@vfamily[1],
- " family: ", yformat(x at dispersion, digits), "\n", sep = ""))
+ x at family@vfamily[1],
+ " family: ", yformat(x at dispersion, digits), "\n",
+ sep = ""))
}
@@ -204,60 +213,115 @@ vcovdefault <- function(object, ...) {
- vcovvlm <- function(object, dispersion = NULL, untransform = FALSE) {
- so <- summaryvlm(object, correlation = FALSE, dispersion = dispersion)
- d = if (any(slotNames(so) == "dispersion") &&
+
+ vcovvlm <-
+function(object, dispersion = NULL, untransform = FALSE) {
+
+
+
+ so <- summaryvlm(object, correlation = FALSE,
+ dispersion = dispersion)
+ d <- if (any(slotNames(so) == "dispersion") &&
is.Numeric(so at dispersion)) so at dispersion else 1
- answer = d * so at cov.unscaled
-
- if (is.logical(OKRC <- object at misc$RegCondOK) && !OKRC)
- warning("MLE regularity conditions were violated ",
- "at the final iteration of the fitted object")
-
- if (!untransform)
- return(answer)
-
- if (!is.logical(object at misc$intercept.only))
- stop("cannot determine whether the object is",
- "an intercept-only fit, i.e., 'y ~ 1' is the response")
- if (!object at misc$intercept.only)
- stop("object must be an intercept-only fit, i.e., ",
- "y ~ 1 is the response")
-
- if (!all(trivial.constraints(constraints(object)) == 1))
- stop("object must have trivial constraints")
-
- M = object at misc$M
- Links = object at misc$link
- if (length(Links) != M && length(Links) != 1)
- stop("cannot obtain the link functions to untransform the object")
-
-
- tvector = numeric(M)
- etavector = predict(object)[1,] # Contains transformed parameters
- earg = object at misc$earg # This could be a NULL
- if (!is.null(earg) && M > 1 && (!is.list(earg) || length(earg) != M))
- stop("the 'earg' component of 'object at misc' should be of length ", M)
- for(ii in 1:M) {
- TTheta = etavector[ii] # Transformed theta
- use.earg = if (M == 1 || is.null(earg)) earg else earg[[ii]]
- if (is.list(use.earg) && !length(use.earg))
- use.earg = NULL
- newcall = paste(Links[ii],
- "(theta = TTheta, earg = use.earg, inverse = TRUE)",
- sep = "")
- newcall = parse(text=newcall)[[1]]
- Theta = eval(newcall) # Theta, the untransformed parameter
- newcall = paste(Links[ii],
- "(theta=Theta, earg = use.earg, deriv=1)", sep = "")
- newcall = parse(text=newcall)[[1]]
- tvector[ii] = eval(newcall)
+ answer <- d * so at cov.unscaled
+
+ if (is.logical(OKRC <- object at misc$RegCondOK) && !OKRC)
+ warning("MLE regularity conditions were violated ",
+ "at the final iteration of the fitted object")
+
+ if (!untransform)
+ return(answer)
+
+
+
+
+
+ new.way <- TRUE
+
+
+
+ if (!is.logical(object at misc$intercept.only))
+ stop("cannot determine whether the object is",
+ "an intercept-only fit, i.e., 'y ~ 1' is the response")
+ if (!object at misc$intercept.only)
+ stop("object must be an intercept-only fit, i.e., ",
+ "y ~ 1 is the response")
+
+ if (!all(trivial.constraints(constraints(object)) == 1))
+ stop("object must have trivial constraints")
+
+ M <- object at misc$M
+
+
+
+
+ tvector <- numeric(M)
+ etavector <- predict(object)[1, ] # Contains transformed parameters
+ LINK <- object at misc$link # link.names # This should be a character vector.
+ EARG <- object at misc$earg # This could be a NULL
+ if (is.null(EARG))
+ EARG <- list(theta = NULL)
+ if (!is.list(EARG))
+ stop("the 'earg' component of 'object at misc' must be a list")
+
+
+
+
+ if (length(LINK) != M &&
+ length(LINK) != 1)
+ stop("cannot obtain the link functions to untransform 'object'")
+
+
+
+ if (!is.character(LINK))
+ stop("the 'link' component of 'object at misc' should ",
+ "be a character vector")
+
+ learg <- length(EARG)
+ llink <- length(LINK)
+ if (llink != learg)
+ stop("the 'earg' component of 'object at misc' should ",
+ "be a list of length ", learg)
+
+
+ level1 <- length(EARG) > 3 &&
+ length(intersect(names(EARG),
+ c("theta", "inverse", "deriv", "short", "tag"))) > 3
+ if (level1)
+ EARG <- list(oneOnly = EARG)
+
+
+
+ learg <- length(EARG)
+ for (ii in 1:M) {
+ TTheta <- etavector[ii] # Transformed theta
+
+ use.earg <-
+ if (llink == 1) EARG[[1]] else EARG[[ii]]
+ function.name <-
+ if (llink == 1) LINK else LINK[ii]
+
+
+ if (new.way) {
+ use.earg[["inverse"]] <- TRUE # New
+ use.earg[["theta"]] <- TTheta # New
+ Theta <- do.call(function.name, use.earg)
+
+ use.earg[["inverse"]] <- FALSE # Reset this
+ use.earg[["deriv"]] <- 1 # New
+ use.earg[["theta"]] <- Theta # Renew this
+ tvector[ii] <- do.call(function.name, use.earg)
+ } else {
+ stop("link functions handled in the new way now")
+
}
- tvector = abs(tvector)
- answer = (cbind(tvector) %*% rbind(tvector)) * answer
- if (length(dmn2 <- names(object at misc$link)) == M)
- dimnames(answer) = list(dmn2, dmn2)
- answer
+ } # of for(ii in 1:M)
+
+ tvector <- abs(tvector)
+ answer <- (cbind(tvector) %*% rbind(tvector)) * answer
+ if (length(dmn2 <- names(object at misc$link)) == M)
+ dimnames(answer) <- list(dmn2, dmn2)
+ answer
}
diff --git a/R/vsmooth.spline.q b/R/vsmooth.spline.q
index c6fc0b2..d5bbd14 100644
--- a/R/vsmooth.spline.q
+++ b/R/vsmooth.spline.q
@@ -81,6 +81,18 @@ setMethod("predict", "vsmooth.spline.fit",
predictvsmooth.spline.fit(object, ...))
+
+setMethod("model.matrix", "vsmooth.spline",
+ function(object, ...)
+ model.matrixvlm(object, ...))
+
+
+
+
+
+
+
+
vsmooth.spline <- function(x, y, w = NULL, df = rep(5, M),
spar = NULL, #rep(0,M),
all.knots = FALSE,
@@ -451,23 +463,23 @@ show.vsmooth.spline <- function(x, ...) {
}
-coefvsmooth.spline.fit = function(object, ...) {
+coefvsmooth.spline.fit <- function(object, ...) {
object at Bcoefficients
}
-coefvsmooth.spline = function(object, matrix = FALSE, ...) {
+coefvsmooth.spline <- function(object, matrix = FALSE, ...) {
list(lfit = coefvlm(object at lfit, matrix.out = matrix),
nlfit=coefvsmooth.spline.fit(object at nlfit))
}
-fittedvsmooth.spline = function(object, ...) {
+fittedvsmooth.spline <- function(object, ...) {
object at y
}
-residvsmooth.spline = function(object, ...) {
+residvsmooth.spline <- function(object, ...) {
as.matrix(object at yin - object at y)
}
@@ -594,7 +606,8 @@ predictvsmooth.spline.fit <- function(object, x, deriv = 0) {
}
-valid.vknotl2 = function(knot, tol = 1/1024) {
+
+valid.vknotl2 <- function(knot, tol = 1/1024) {
junk = dotC(name="Yee_pknootl2", knot=as.double(knot),
as.integer(length(knot)),
diff --git a/data/alclevels.rda b/data/alclevels.rda
index ad50757..81f9610 100644
Binary files a/data/alclevels.rda and b/data/alclevels.rda differ
diff --git a/data/alcoff.rda b/data/alcoff.rda
index 5e6180f..d9dba17 100644
Binary files a/data/alcoff.rda and b/data/alcoff.rda differ
diff --git a/data/auuc.rda b/data/auuc.rda
index b1de85d..f305bdd 100644
Binary files a/data/auuc.rda and b/data/auuc.rda differ
diff --git a/data/backPain.rda b/data/backPain.rda
index bdd516a..365a38a 100644
Binary files a/data/backPain.rda and b/data/backPain.rda differ
diff --git a/data/car.all.rda b/data/car.all.rda
index 4fc62ee..c106411 100644
Binary files a/data/car.all.rda and b/data/car.all.rda differ
diff --git a/data/crashbc.rda b/data/crashbc.rda
index c301cda..ea9c46a 100644
Binary files a/data/crashbc.rda and b/data/crashbc.rda differ
diff --git a/data/crashf.rda b/data/crashf.rda
index 8ee5172..fbd0fc1 100644
Binary files a/data/crashf.rda and b/data/crashf.rda differ
diff --git a/data/crashi.rda b/data/crashi.rda
index 3f2e18d..bb96e9f 100644
Binary files a/data/crashi.rda and b/data/crashi.rda differ
diff --git a/data/crashmc.rda b/data/crashmc.rda
index a98ddc3..2932344 100644
Binary files a/data/crashmc.rda and b/data/crashmc.rda differ
diff --git a/data/crashp.rda b/data/crashp.rda
index 7083223..6d995b8 100644
Binary files a/data/crashp.rda and b/data/crashp.rda differ
diff --git a/data/crashtr.rda b/data/crashtr.rda
index 56b7f46..6feedb5 100644
Binary files a/data/crashtr.rda and b/data/crashtr.rda differ
diff --git a/data/crime.us.rda b/data/crime.us.rda
index 6ed26f6..d223c25 100644
Binary files a/data/crime.us.rda and b/data/crime.us.rda differ
diff --git a/data/datalist b/data/datalist
index 2b5d14a..db7b6bd 100644
--- a/data/datalist
+++ b/data/datalist
@@ -21,6 +21,7 @@ finney44
gala
gew
grain.us
+hormone
hspider
hued
huie
diff --git a/data/fibre15.rda b/data/fibre15.rda
index 9241a9c..f8eb7a6 100644
Binary files a/data/fibre15.rda and b/data/fibre15.rda differ
diff --git a/data/fibre1dot5.rda b/data/fibre1dot5.rda
index 210170c..5f52020 100644
Binary files a/data/fibre1dot5.rda and b/data/fibre1dot5.rda differ
diff --git a/data/finney44.rda b/data/finney44.rda
index 6f43bba..2601c01 100644
Binary files a/data/finney44.rda and b/data/finney44.rda differ
diff --git a/data/gala.rda b/data/gala.rda
index 7b15c26..f585262 100644
Binary files a/data/gala.rda and b/data/gala.rda differ
diff --git a/data/hormone.txt.bz2 b/data/hormone.txt.bz2
new file mode 100644
index 0000000..68a1d36
Binary files /dev/null and b/data/hormone.txt.bz2 differ
diff --git a/data/hspider.rda b/data/hspider.rda
index 2039f04..c9caaa4 100644
Binary files a/data/hspider.rda and b/data/hspider.rda differ
diff --git a/data/hued.rda b/data/hued.rda
index f102e02..881bdde 100644
Binary files a/data/hued.rda and b/data/hued.rda differ
diff --git a/data/huie.rda b/data/huie.rda
index 0160b6b..9a1d5f9 100644
Binary files a/data/huie.rda and b/data/huie.rda differ
diff --git a/data/huse.rda b/data/huse.rda
index 23aa88b..e58dec1 100644
Binary files a/data/huse.rda and b/data/huse.rda differ
diff --git a/data/leukemia.rda b/data/leukemia.rda
index b800e62..3bd8d95 100644
Binary files a/data/leukemia.rda and b/data/leukemia.rda differ
diff --git a/data/marital.nz.rda b/data/marital.nz.rda
index e225bf6..5c68005 100644
Binary files a/data/marital.nz.rda and b/data/marital.nz.rda differ
diff --git a/data/mmt.rda b/data/mmt.rda
index aba86e8..dd515a8 100644
Binary files a/data/mmt.rda and b/data/mmt.rda differ
diff --git a/data/pneumo.rda b/data/pneumo.rda
index ddf3da9..cf8c0ee 100644
Binary files a/data/pneumo.rda and b/data/pneumo.rda differ
diff --git a/data/rainfall.rda b/data/rainfall.rda
index 7eafe0e..e3c612a 100644
Binary files a/data/rainfall.rda and b/data/rainfall.rda differ
diff --git a/data/ruge.rda b/data/ruge.rda
index 0a90538..161ae03 100644
Binary files a/data/ruge.rda and b/data/ruge.rda differ
diff --git a/data/toxop.rda b/data/toxop.rda
index ac0958c..cf85b63 100644
Binary files a/data/toxop.rda and b/data/toxop.rda differ
diff --git a/data/ugss.rda b/data/ugss.rda
index fb5fc61..fcc35d3 100644
Binary files a/data/ugss.rda and b/data/ugss.rda differ
diff --git a/data/venice.rda b/data/venice.rda
index 16e8ec4..7f1a32c 100644
Binary files a/data/venice.rda and b/data/venice.rda differ
diff --git a/data/venice90.rda b/data/venice90.rda
index 4e26679..397db38 100644
Binary files a/data/venice90.rda and b/data/venice90.rda differ
diff --git a/data/wffc.indiv.rda b/data/wffc.indiv.rda
index 3194917..a3d3742 100644
Binary files a/data/wffc.indiv.rda and b/data/wffc.indiv.rda differ
diff --git a/data/wffc.nc.rda b/data/wffc.nc.rda
index efe9ed9..21cc525 100644
Binary files a/data/wffc.nc.rda and b/data/wffc.nc.rda differ
diff --git a/data/wffc.rda b/data/wffc.rda
index 61b66d8..6bdbf16 100644
Binary files a/data/wffc.rda and b/data/wffc.rda differ
diff --git a/data/wffc.teams.rda b/data/wffc.teams.rda
index 0e46967..d295353 100644
Binary files a/data/wffc.teams.rda and b/data/wffc.teams.rda differ
diff --git a/data/xs.nz.rda b/data/xs.nz.rda
index d6d1bf6..0b20db8 100644
Binary files a/data/xs.nz.rda and b/data/xs.nz.rda differ
diff --git a/inst/doc/categoricalVGAM.Rnw b/inst/doc/categoricalVGAM.Rnw
index 8009523..b5841f5 100644
--- a/inst/doc/categoricalVGAM.Rnw
+++ b/inst/doc/categoricalVGAM.Rnw
@@ -611,9 +611,9 @@ to fit a nonparametric proportional odds model
\citep[cf.~p.179 of][]{mccu:neld:1989}
to the pneumoconiosis data one could try
<<eval=T>>=
-pneumo <- transform(pneumo, let=log(exposure.time))
-fit <- vgam(cbind(normal, mild, severe) ~ s(let, df=2),
- cumulative(reverse=TRUE, parallel=TRUE), pneumo)
+pneumo <- transform(pneumo, let = log(exposure.time))
+fit <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2),
+ cumulative(reverse = TRUE, parallel = TRUE), pneumo)
@
Here, setting \texttt{df = 1} means a linear fit so that
\texttt{df = 2} affords a little nonlinearity.
@@ -1437,7 +1437,7 @@ fit.ppom <- vglm(ordnum ~
mornaft +
fday +
finame,
- cumulative(parallel = FALSE ~ 1 + mornaft, reverse=TRUE),
+ cumulative(parallel = FALSE ~ 1 + mornaft, reverse = TRUE),
data = fnc)
head(coef(fit.ppom, matrix = TRUE), 8)
@
@@ -1460,7 +1460,7 @@ fit2.ppom <- vglm(ordnum ~
fday +
finame,
family = cumulative(parallel = FALSE ~ 1 + fday, reverse = TRUE),
- data=fnc)
+ data = fnc)
head(coef(fit2.ppom, matrix = TRUE), 8)
@
@@ -1527,7 +1527,7 @@ summary(marital.nz)
@
We fit the VGAM
<<>>=
-fit.ms <- vgam(mstatus ~ s(age, df = 3), multinomial(refLevel=2),
+fit.ms <- vgam(mstatus ~ s(age, df = 3), multinomial(refLevel = 2),
data = marital.nz)
@
@@ -1678,9 +1678,9 @@ clist <- list("(Intercept)" = diag(3),
"age" = rbind(0, 0, 1))
fit2.ms <-
vglm(mstatus ~ poly(age, 2) + foo(age) + age,
- family = multinomial(refLevel=2),
- constraints=clist,
- data=marital.nz)
+ family = multinomial(refLevel = 2),
+ constraints = clist,
+ data = marital.nz)
@
Then
<<>>=
@@ -1690,12 +1690,12 @@ confirms that one term was used for each component function.
The plots from
<<fig=F>>=
par(mfrow=c(2,2))
-plotvgam(fit2.ms, se=TRUE, scale=12,
- lcol=mycol[1], scol=mycol[1], which.term=1)
-plotvgam(fit2.ms, se=TRUE, scale=12,
- lcol=mycol[2], scol=mycol[2], which.term=2)
-plotvgam(fit2.ms, se=TRUE, scale=12,
- lcol=mycol[3], scol=mycol[3], which.term=3)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+ lcol = mycol[1], scol = mycol[1], which.term = 1)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+ lcol = mycol[2], scol=mycol[2], which.term = 2)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+ lcol = mycol[3], scol = mycol[3], which.term = 3)
@
are given in Figure~\ref{fig:jsscat.eg.mstatus.vglm}
and appear like
@@ -1710,12 +1710,12 @@ Figure~\ref{fig:jsscat.eg.mstatus}.
# Plot output
par(mfrow=c(2,2))
par(mar=c(4.5,4.0,1.2,2.2)+0.1)
-plotvgam(fit2.ms, se=TRUE, scale=12,
- lcol=mycol[1], scol=mycol[1], which.term=1)
-plotvgam(fit2.ms, se=TRUE, scale=12,
- lcol=mycol[2], scol=mycol[2], which.term=2)
-plotvgam(fit2.ms, se=TRUE, scale=12,
- lcol=mycol[3], scol=mycol[3], which.term=3)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+ lcol = mycol[1], scol = mycol[1], which.term = 1)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+ lcol = mycol[2], scol = mycol[2], which.term = 2)
+plotvgam(fit2.ms, se = TRUE, scale = 12,
+ lcol = mycol[3], scol = mycol[3], which.term = 3)
@
\caption{
Parametric version of~\texttt{fit.ms}: \texttt{fit2.ms}.
@@ -1877,8 +1877,8 @@ set.seed(123)
@
A rank-2 model fitted \textit{with a different normalization}
<<>>=
-bp.rrmlm2 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, backPain, Rank=2,
- Corner=FALSE, Uncor=TRUE)
+bp.rrmlm2 <- rrvglm(pain ~ sx1 + sx2 + sx3, multinomial, backPain, Rank = 2,
+ Corner = FALSE, Uncor = TRUE)
@
produces uncorrelated $\widehat{\bnu}_i = \widehat{\bC}^{\top} \bix_{2i}$.
In fact \textsl{\texttt{var(lv(bp.rrmlm2))}} equals $\bI_2$
diff --git a/inst/doc/categoricalVGAM.pdf b/inst/doc/categoricalVGAM.pdf
index 40abdf7..79095d7 100644
Binary files a/inst/doc/categoricalVGAM.pdf and b/inst/doc/categoricalVGAM.pdf differ
diff --git a/man/AA.Aa.aa.Rd b/man/AA.Aa.aa.Rd
index 2cd9152..0786a8b 100644
--- a/man/AA.Aa.aa.Rd
+++ b/man/AA.Aa.aa.Rd
@@ -7,7 +7,7 @@
AA-Aa-aa blood group system.
}
\usage{
-AA.Aa.aa(link = "logit", earg=list(), init.pA = NULL)
+AA.Aa.aa(link = "logit", init.pA = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -16,11 +16,6 @@ AA.Aa.aa(link = "logit", earg=list(), init.pA = NULL)
See \code{\link{Links}} for more choices.
}
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{init.pA}{ Optional initial value for \code{pA}. }
}
\details{
diff --git a/man/AB.Ab.aB.ab.Rd b/man/AB.Ab.aB.ab.Rd
index 68db8ce..a752ce7 100644
--- a/man/AB.Ab.aB.ab.Rd
+++ b/man/AB.Ab.aB.ab.Rd
@@ -8,7 +8,7 @@
}
\usage{
-AB.Ab.aB.ab(link = "logit", earg=list(), init.p = NULL)
+AB.Ab.aB.ab(link = "logit", init.p = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -17,11 +17,6 @@ AB.Ab.aB.ab(link = "logit", earg=list(), init.p = NULL)
See \code{\link{Links}} for more choices.
}
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{init.p}{ Optional initial value for \code{p}. }
}
\details{
diff --git a/man/AB.Ab.aB.ab2.Rd b/man/AB.Ab.aB.ab2.Rd
index 8014a67..a62806d 100644
--- a/man/AB.Ab.aB.ab2.Rd
+++ b/man/AB.Ab.aB.ab2.Rd
@@ -8,7 +8,7 @@
}
\usage{
-AB.Ab.aB.ab2(link = "logit", earg=list(), init.p = NULL)
+AB.Ab.aB.ab2(link = "logit", init.p = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -17,11 +17,6 @@ AB.Ab.aB.ab2(link = "logit", earg=list(), init.p = NULL)
See \code{\link{Links}} for more choices.
}
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{init.p}{ Optional initial value for \code{p}. }
}
\details{
diff --git a/man/ABO.Rd b/man/ABO.Rd
index 07417a1..09d617e 100644
--- a/man/ABO.Rd
+++ b/man/ABO.Rd
@@ -8,7 +8,7 @@
}
\usage{
-ABO(link = "logit", earg=list(), ipA = NULL, ipO = NULL)
+ABO(link = "logit", ipA = NULL, ipO = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -17,11 +17,6 @@ ABO(link = "logit", earg=list(), ipA = NULL, ipO = NULL)
See \code{\link{Links}} for more choices.
}
- \item{earg}{
- List. Extra argument applied to each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{ipA, ipO}{
Optional initial value for \code{pA} and \code{pO}.
A \code{NULL} value means values are computed internally.
diff --git a/man/CommonVGAMffArguments.Rd b/man/CommonVGAMffArguments.Rd
index cee7815..1ff22e8 100644
--- a/man/CommonVGAMffArguments.Rd
+++ b/man/CommonVGAMffArguments.Rd
@@ -13,11 +13,14 @@
}
\usage{
-TypicalVGAMfamilyFunction(lsigma = "loge", esigma = list(), isigma = NULL,
- parallel = TRUE, shrinkage.init = 0.95,
+TypicalVGAMfamilyFunction(lsigma = "loge",
+ isigma = NULL, parallel = TRUE,
+ shrinkage.init = 0.95,
nointercept = NULL, imethod = 1,
- prob.x = c(0.15, 0.85), mv = FALSE,
- whitespace = FALSE,
+ probs.x = c(0.15, 0.85),
+ probs.y = c(0.25, 0.50, 0.75),
+ mv = FALSE, earg.link = FALSE,
+ whitespace = FALSE, bred = FALSE,
oim = FALSE, nsimEIM = 100, zero = NULL)
}
\arguments{
@@ -30,15 +33,17 @@ TypicalVGAMfamilyFunction(lsigma = "loge", esigma = list(), isigma = NULL,
\code{link}.
}
- \item{esigma}{
- List.
- Extra argument allowing for additional information, specific to the
- link function.
- See \code{\link{Links}} for more information.
- If there is only one parameter then this argument is often called
- \code{earg}.
+% \item{esigma}{
+% List.
+% Extra argument allowing for additional information, specific to the
+% link function.
+% See \code{\link{Links}} for more information.
+% If there is only one parameter then this argument is often called
+% \code{earg}.
+
+% }
+
- }
\item{isigma}{
Optional initial values can often be inputted using an argument
beginning with \code{"i"}.
@@ -51,8 +56,16 @@ TypicalVGAMfamilyFunction(lsigma = "loge", esigma = list(), isigma = NULL,
}
\item{parallel}{
- A logical, or formula specifying which terms have equal/unequal
+ A logical, or a simple formula specifying which terms have equal/unequal
coefficients.
+ The formula must be simple, i.e., additive with simple main effects terms.
+ Interactions and nesting etc. are not handled.
+ To handle complex formulas use the \code{constraints} argument
+ (of \code{\link{vglm}} etc.);
+ however, there is a lot more setting up involved and things will
+ not be as convenient.
+
+
This argument is common in \pkg{VGAM} family functions for categorical
responses, e.g., \code{\link{cumulative}}, \code{\link{acat}},
\code{\link{cratio}}, \code{\link{sratio}}.
@@ -104,12 +117,12 @@ TypicalVGAMfamilyFunction(lsigma = "loge", esigma = list(), isigma = NULL,
}
- \item{prob.x}{
- Numeric, of length two.
+ \item{probs.x, probs.y}{
+ Numeric, with values in (0, 1).
The probabilites that define quantiles with respect to some vector,
- usually an \code{x} of some sort.
+ usually an \code{x} or \code{y} of some sort.
This is used to create two subsets of data corresponding to `low' and
- `high' values of x.
+ `high' values of x or y.
Each value is separately fed into the \code{probs} argument
of \code{\link[stats:quantile]{quantile}}.
If the data set size is small then it may be necessary to
@@ -218,6 +231,22 @@ TypicalVGAMfamilyFunction(lsigma = "loge", esigma = list(), isigma = NULL,
response and contains the number of trials.
}
+ \item{earg.link}{
+ Sometimes the link argument can receive \code{earg}-type input,
+ such as \code{\link{quasibinomial}} calling \code{\link{binomial}}.
+ This argument should be generally ignored.
+
+
+ }
+ \item{bred}{
+ Logical.
+ Some \pkg{VGAM} family functions will allow bias-reduction based
+ on the work by Kosmidis and Firth.
+ Currently none are working yet!
+
+
+
+ }
}
\value{
@@ -225,6 +254,7 @@ TypicalVGAMfamilyFunction(lsigma = "loge", esigma = list(), isigma = NULL,
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\section{Warning }{
The \code{zero} argument is supplied for convenience but conflicts
@@ -261,21 +291,41 @@ TypicalVGAMfamilyFunction(lsigma = "loge", esigma = list(), isigma = NULL,
Full details will be given in documentation yet to be written,
at a later date!
+
}
-%\references{
-%}
+\references{
+
+Kosmidis, I. and Firth, D. (2009)
+Bias reduction in exponential family nonlinear models.
+\emph{Biometrika},
+\bold{96}(4), 793--804.
+
+
+%Kosmidis, I. and Firth, D. (2010)
+%A generic algorithm for reducing bias in parametric estimation.
+%\emph{Electronic Journal of Statistics},
+%\bold{4}, 1097--1112.
+
+
+}
\seealso{
\code{\link{Links}},
\code{\link{vglmff-class}}.
+
}
\author{T. W. Yee}
-%\note{
-%
-%}
+\note{
+ See \code{\link{Links}} regarding a major change in
+ link functions, for version 0.9-0 and higher
+ (released during the 2nd half of 2012).
+
+
+
+}
\examples{
# Example 1
@@ -283,21 +333,21 @@ cumulative()
cumulative(link = "probit", reverse = TRUE, parallel = TRUE)
# Example 2
-wdata <- data.frame(x = runif(nn <- 1000))
+wdata <- data.frame(x2 = runif(nn <- 1000))
wdata <- transform(wdata,
- y = rweibull(nn, shape = 2 + exp(1+x), scale = exp(-0.5)))
-fit = vglm(y ~ x, weibull(lshape = "logoff", eshape = list(offset = -2),
- zero = 2), wdata)
+ y = rweibull(nn, shape = 2 + exp(1 + x2), scale = exp(-0.5)))
+fit <- vglm(y ~ x2, weibull(lshape = logoff(offset = -2), zero = 2), wdata)
coef(fit, mat = TRUE)
# Example 3; multivariate (multiple) response
+\dontrun{
ndata <- data.frame(x = runif(nn <- 500))
ndata <- transform(ndata,
y1 = rnbinom(nn, mu = exp(3+x), size = exp(1)), # k is size
y2 = rnbinom(nn, mu = exp(2-x), size = exp(0)))
fit <- vglm(cbind(y1, y2) ~ x, negbinomial(zero = -2), ndata)
coef(fit, matrix = TRUE)
-
+}
# Example 4
\dontrun{
# fit1 and fit2 are equivalent
@@ -308,27 +358,27 @@ fit2 <- vglm(ymatrix ~ x2 + x3 + x4 + x5,
}
# Example 5
-gdata <- data.frame(x = rnorm(nn <- 200))
+gdata <- data.frame(x2 = rnorm(nn <- 200))
gdata <- transform(gdata,
- y1 = rnorm(nn, mean = 1 - 3*x, sd = exp(1 + 0.2*x)),
- y2 = rnorm(nn, mean = 1 - 3*x, sd = exp(1)))
+ y1 = rnorm(nn, mean = 1 - 3*x2, sd = exp(1 + 0.2*x2)),
+ y2 = rnorm(nn, mean = 1 - 3*x2, sd = exp(1)))
args(normal1)
-fit1 <- vglm(y1 ~ x, normal1, gdata) # This is ok
-fit2 <- vglm(y2 ~ x, normal1(zero = 2), gdata) # This is ok
+fit1 <- vglm(y1 ~ x2, normal1, gdata) # This is ok
+fit2 <- vglm(y2 ~ x2, normal1(zero = 2), gdata) # This is ok
# This creates potential conflict
-clist <- list("(Intercept)" = diag(2), "x" = diag(2))
-fit3 <- vglm(y2 ~ x, normal1(zero = 2), gdata,
+clist <- list("(Intercept)" = diag(2), "x2" = diag(2))
+fit3 <- vglm(y2 ~ x2, normal1(zero = 2), gdata,
constraints = clist) # Conflict!
-coef(fit3, matrix = TRUE) # Shows that clist[["x"]] was overwritten,
+coef(fit3, matrix = TRUE) # Shows that clist[["x2"]] was overwritten,
constraints(fit3) # i.e., 'zero' seems to override the 'constraints' arg
# Example 6 ('whitespace' argument)
-pneumo = transform(pneumo, let = log(exposure.time))
-fit1 = vglm(cbind(normal, mild, severe) ~ let,
- sratio(whitespace = FALSE, parallel = TRUE), pneumo)
-fit2 = vglm(cbind(normal, mild, severe) ~ let,
- sratio(whitespace = TRUE, parallel = TRUE), pneumo)
+pneumo <- transform(pneumo, let = log(exposure.time))
+fit1 <- vglm(cbind(normal, mild, severe) ~ let,
+ sratio(whitespace = FALSE, parallel = TRUE), pneumo)
+fit2 <- vglm(cbind(normal, mild, severe) ~ let,
+ sratio(whitespace = TRUE, parallel = TRUE), pneumo)
head(predict(fit1), 2) # No white spaces
head(predict(fit2), 2) # Uses white spaces
}
diff --git a/man/G1G2G3.Rd b/man/G1G2G3.Rd
index e81a7ec..55456d1 100644
--- a/man/G1G2G3.Rd
+++ b/man/G1G2G3.Rd
@@ -8,7 +8,7 @@
}
\usage{
-G1G2G3(link = "logit", earg=list(), ip1 = NULL, ip2 = NULL, iF = NULL)
+G1G2G3(link = "logit", ip1 = NULL, ip2 = NULL, iF = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -17,11 +17,6 @@ G1G2G3(link = "logit", earg=list(), ip1 = NULL, ip2 = NULL, iF = NULL)
See \code{\link{Links}} for more choices.
}
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{ip1, ip2, iF}{
Optional initial value for \code{p1}, \code{p2} and \code{f}.
@@ -69,11 +64,11 @@ argument is used to specify the total number of counts for each row.
\code{\link{MNSs}}.
}
\examples{
-y = cbind(108, 196, 429, 143, 513, 559)
-fit = vglm(y ~ 1, G1G2G3(link=probit), trace=TRUE, crit="coef")
-fit = vglm(y ~ 1, G1G2G3(link=logit, ip1=.3, ip2=.3, iF=.02),
- trace=TRUE, crit="coef")
-fit = vglm(y ~ 1, G1G2G3(link="identity"), trace=TRUE)
+y <- cbind(108, 196, 429, 143, 513, 559)
+fit <- vglm(y ~ 1, G1G2G3(link = probit), trace = TRUE, crit = "coef")
+fit <- vglm(y ~ 1, G1G2G3(link = logit, ip1 = 0.3, ip2 = 0.3, iF = 0.02),
+ trace = TRUE, crit = "coef")
+fit <- vglm(y ~ 1, G1G2G3(link = "identity"), trace = TRUE)
Coef(fit) # Estimated p1, p2 and f
rbind(y, sum(y)*fitted(fit))
sqrt(diag(vcov(fit)))
diff --git a/man/Inv.gaussian.Rd b/man/Inv.gaussian.Rd
index 0ba90fc..10f630f 100644
--- a/man/Inv.gaussian.Rd
+++ b/man/Inv.gaussian.Rd
@@ -26,7 +26,7 @@ rinv.gaussian(n, mu, lambda)
\item{lambda}{the \eqn{\lambda}{lambda} parameter.}
\item{log}{
Logical.
- If \code{log=TRUE} then the logarithm of the density is returned.
+ If \code{log = TRUE} then the logarithm of the density is returned.
}
diff --git a/man/Links.Rd b/man/Links.Rd
index 4ae9c69..af8ca80 100644
--- a/man/Links.Rd
+++ b/man/Links.Rd
@@ -9,8 +9,10 @@
}
\usage{
-TypicalVGAMlinkFunction(theta, earg = list(), inverse = FALSE,
- deriv = 0, short = TRUE, tag = FALSE)
+TypicalVGAMlinkFunction(theta, someParameter = 0,
+ bvalue = NULL,
+ inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
}
\arguments{
\item{theta}{
@@ -19,45 +21,76 @@ TypicalVGAMlinkFunction(theta, earg = list(), inverse = FALSE,
depending on the other arguments.
If \code{theta} is character then \code{inverse} and
\code{deriv} are ignored.
+ The name \code{theta} should always be the name of the first argument.
}
- \item{earg}{
- List.
- Extra argument allowing for additional information, specific to the
- link function. For example, for \code{\link{logoff}}, this will
- contain the offset value. The argument \code{earg} is
- always a list with \emph{named} components. See each specific link
- function to find the component names for the list.
-
-
- Almost all \pkg{VGAM} family functions with a single link
- function have an argument (often called \code{earg}) which will
- allow parameters to be inputted for that link function.
- For \pkg{VGAM} family functions with more than one link
- function there usually will be an \code{earg}-type argument for
- each link. For example, if there are two links called
- \code{lshape} and \code{lscale} then
- the \code{earg}-type arguments for these might be called
- \code{eshape} and \code{escale}, say.
+
+ \item{someParameter}{
+ Some parameter, e.g., an offset.
+
+
+ }
+ \item{bvalue}{
+ Boundary value, positive if given.
+ If \code{0 < theta} then
+ values of \code{theta} which are less than or equal to 0 can be
+ replaced by \code{bvalue}
+ before computing the link function value.
+ Values of \code{theta} which are greater than or equal to 1 can be
+ replaced by 1 minus \code{bvalue}
+ before computing the link function value.
+ The value \code{bvalue = .Machine$double.eps} is sometimes a reasonable
+ value, or something slightly higher.
+
}
+
+
+% \item{earg}{
+% List.
+% Extra argument allowing for additional information, specific to the
+% link function. For example, for \code{\link{logoff}}, this will
+% contain the offset value. The argument \code{earg} is
+% always a list with \emph{named} components. See each specific link
+% function to find the component names for the list.
+%
+%
+% Almost all \pkg{VGAM} family functions with a single link
+% function have an argument (often called \code{earg}) which will
+% allow parameters to be inputted for that link function.
+% For \pkg{VGAM} family functions with more than one link
+% function there usually will be an \code{earg}-type argument for
+% each link. For example, if there are two links called
+% \code{lshape} and \code{lscale} then
+% the \code{earg}-type arguments for these might be called
+% \code{eshape} and \code{escale}, say.
+%
+% }
+
+
+
+
\item{inverse}{
Logical. If \code{TRUE} the inverse link value
\eqn{\theta}{theta} is returned, hence the argument
\code{theta} is really \eqn{\eta}{eta}.
+
}
\item{deriv}{
Integer. Either 0, 1, or 2 specifying the order of the derivative.
+
}
\item{short, tag}{
Logical.
- Used for labelling the \code{blurb} slot of a
+ These are used for labelling the \code{blurb} slot of a
\code{\link{vglmff-class}} object.
- Used only if \code{theta} is character, and gives the formula
- for the link in character form.
- If \code{tag = TRUE} then the result contains a little more information.
+ These arguments are used only if \code{theta} is character,
+ and gives the formula for the link in character form.
+ If \code{tag = TRUE} then the result is preceeded by a little
+ more information.
+
}
}
@@ -85,7 +118,7 @@ TypicalVGAMlinkFunction(theta, earg = list(), inverse = FALSE,
\eqn{\eta}{eta}.
If \code{inverse = TRUE} and \code{deriv} is positive then the
\emph{reciprocal} of the same link function with
- \code{(theta = theta, earg = earg, inverse = TRUE, deriv = deriv)}
+ \code{(theta = theta, someParameter, inverse = TRUE, deriv = deriv)}
is returned.
@@ -93,14 +126,19 @@ TypicalVGAMlinkFunction(theta, earg = list(), inverse = FALSE,
\details{
Almost all \pkg{VGAM} link functions have something similar to
the argument list as given above.
- That is, there is a matching \code{earg} for each \code{link} argument.
- In this help file
- we have \eqn{\eta = g(\theta)}{eta = g(theta)}
- where \eqn{g} is the link function, \eqn{\theta}{theta} is the parameter
- and \eqn{\eta}{eta} is the linear/additive predictor.
+ In this help file we have \eqn{\eta = g(\theta)}{eta = g(theta)}
+ where \eqn{g} is the link function, \eqn{\theta}{theta} is the
+ parameter and \eqn{\eta}{eta} is the linear/additive predictor.
+
+
+% The arguments \code{short} and \code{tag} are used only if
+% \code{theta} is character.
+
+% That is, there is a matching \code{earg} for each \code{link} argument.
+
The following is a brief enumeration of all \pkg{VGAM} link functions.
@@ -189,17 +227,48 @@ TypicalVGAMlinkFunction(theta, earg = list(), inverse = FALSE,
a character string, or just the name itself. See the examples below.
+ From August 2012 onwards, a major change in link functions
+ occurred.
+ Argument \code{esigma} (and the like such as \code{earg})
+ used to be in \pkg{VGAM} prior to version 0.9-0 (released
+ during the 2nd half of 2012).
+ The major change is that arguments such as \code{offset} that used to
+ be passed in via those arguments can done directly through
+ the link function. For example, \code{gev(lshape = "logoff",
+ eshape = list(offset = 0.5))} is replaced by \code{gev(lshape
+ = logoff(offset = 0.5))}. The \code{@misc} slot no longer
+ has \code{link} and \code{earg} components, but two other
+ components replace these. Functions such as \code{dtheta.deta()},
+ \code{d2theta.deta2()}, \code{eta2theta()}, \code{theta2eta()}
+ are modified.
+
+
+
+
+
+
+
+
+
+
+
+
}
\examples{
logit("a")
logit("a", short = FALSE)
logit("a", short = FALSE, tag = TRUE)
-logoff(1:5, earg = list(offset = 1)) # Same as log(1:5 + 1)
-powl(1:5, earg = list(power = 2)) # Same as (1:5)^2
+logoff(1:5, offset = 1) # Same as log(1:5 + 1)
+powl(1:5, power = 2) # Same as (1:5)^2
+
+\dontrun{ # This is old and no longer works:
+logoff(1:5, earg = list(offset = 1))
+powl(1:5, earg = list(power = 2))
+}
-fit1 <- vgam(agaaus ~ altitude, binomialff(link = cloglog), hunua) # ok
-fit2 <- vgam(agaaus ~ altitude, binomialff(link = "cloglog"), hunua) # ok
+fit1 <- vgam(agaaus ~ altitude, binomialff(link = "cloglog"), hunua) # ok
+fit2 <- vgam(agaaus ~ altitude, binomialff(link = "cloglog"), hunua) # ok
\dontrun{
# This no longer works since "clog" is not a valid VGAM link function:
@@ -209,17 +278,19 @@ fit3 <- vgam(agaaus ~ altitude, binomialff(link = "clog"), hunua) # not ok
# No matter what the link, the estimated var-cov matrix is the same
y <- rbeta(n = 1000, shape1 = exp(0), shape2 = exp(1))
fit1 <- vglm(y ~ 1, beta.ab(lshape1 = "identity", lshape2 = "identity"),
- trace = TRUE, crit = "c")
-fit2 <- vglm(y ~ 1, beta.ab(lshape1 = logoff, eshape1 = list(offset = 1.1),
- lshape2 = logoff, eshape2 = list(offset = 1.1)),
- trace = TRUE, crit = "c")
-vcov(fit1, untran = TRUE)
-vcov(fit1, untran = TRUE) - vcov(fit2, untran = TRUE) # Should be all 0s
-fit1 at misc$earg # No 'special' parameters
-fit2 at misc$earg # Some 'special' parameters are here
+ trace = TRUE, crit = "coef")
+fit2 <- vglm(y ~ 1, beta.ab(lshape1 = logoff(offset = 1.1),
+ lshape2 = logoff(offset = 1.1)),
+ trace = TRUE, crit = "coef")
+vcov(fit1, untransform = TRUE)
+vcov(fit1, untransform = TRUE) - vcov(fit2, untransform = TRUE) # Should be all 0s
+\dontrun{ # This is old:
+fit1 at misc$earg # Some 'special' parameters
+fit2 at misc$earg # Some 'special' parameters are here
+}
-par(mfrow = c(2,2))
+par(mfrow = c(2, 2))
p <- seq(0.01, 0.99, len = 200)
x <- seq(-4, 4, len = 200)
plot(p, logit(p), type = "l", col = "blue")
diff --git a/man/MNSs.Rd b/man/MNSs.Rd
index dc0760d..c082d16 100644
--- a/man/MNSs.Rd
+++ b/man/MNSs.Rd
@@ -7,7 +7,7 @@
the MNSs blood group system.
}
\usage{
-MNSs(link = "logit", earg=list(), imS = NULL, ims = NULL, inS = NULL)
+MNSs(link = "logit", imS = NULL, ims = NULL, inS = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -16,11 +16,6 @@ MNSs(link = "logit", earg=list(), imS = NULL, ims = NULL, inS = NULL)
See \code{\link{Links}} for more choices.
}
- \item{earg}{
- List. Extra argument applied to each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{imS, ims, inS}{
Optional initial value for \code{mS}, \code{ms}
and \code{nS} respectively.
@@ -36,17 +31,20 @@ MNSs(link = "logit", earg=list(), imS = NULL, ims = NULL, inS = NULL)
\code{(g(m_S), g(m_s), g(n_S))} where \code{g} is the
link function.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\references{
Elandt-Johnson, R. C. (1971)
\emph{Probability Models and Statistical Methods in Genetics},
New York: Wiley.
+
}
\author{ T. W. Yee }
\note{
@@ -56,6 +54,7 @@ MNSs(link = "logit", earg=list(), imS = NULL, ims = NULL, inS = NULL)
proportions (so each row adds to 1) and the \code{weights}
argument is used to specify the total number of counts for each row.
+
}
\seealso{
\code{\link{AA.Aa.aa}},
@@ -63,15 +62,17 @@ MNSs(link = "logit", earg=list(), imS = NULL, ims = NULL, inS = NULL)
\code{\link{AB.Ab.aB.ab2}},
\code{\link{ABO}},
\code{\link{G1G2G3}}.
+
+
}
\examples{
# Order matters only:
-y = cbind(MS=295, Ms=107, MNS=379, MNs=322, NS=102, Ns=214)
-fit = vglm(y ~ 1, MNSs("logit", .25, .28, .08), trace=TRUE)
-fit = vglm(y ~ 1, MNSs(link=logit), trace=TRUE, cri="coef")
+y <- cbind(MS = 295, Ms = 107, MNS = 379, MNs = 322, NS = 102, Ns = 214)
+fit <- vglm(y ~ 1, MNSs("logit", .25, .28, .08), trace = TRUE)
+fit <- vglm(y ~ 1, MNSs(link = logit), trace = TRUE, crit = "coef")
Coef(fit)
rbind(y, sum(y)*fitted(fit))
-diag(vcov(fit))^0.5
+sqrt(diag(vcov(fit)))
}
\keyword{models}
\keyword{regression}
diff --git a/man/Max.Rd b/man/Max.Rd
index 70fda24..e39a927 100644
--- a/man/Max.Rd
+++ b/man/Max.Rd
@@ -4,6 +4,7 @@
\title{ Maxima }
\description{
Generic function for the \emph{maxima} (maximums) of a model.
+
}
\usage{
Max(object, ...)
@@ -13,16 +14,19 @@ Max(object, ...)
\item{object}{ An object for which the computation or
extraction of
a maximum (or maxima) is meaningful.
+
}
\item{\dots}{ Other arguments fed into the specific
methods function of the model. Sometimes they are fed
into the methods function for \code{\link{Coef}}.
+
}
}
\details{
Different models can define a maximum in different ways.
Many models have no such notion or definition.
+
Maxima occur in quadratic and additive ordination,
e.g., CQO or UQO or CAO.
For these models the maximum is the fitted value at the
@@ -32,10 +36,13 @@ Max(object, ...)
on the boundary, then the optimum is undefined. For
a valid optimum, the fitted value at the optimum is the maximum.
+
}
\value{
The value returned depends specifically on the methods
function invoked.
+
+
}
\references{
@@ -45,10 +52,12 @@ canonical Gaussian ordination.
\emph{Ecological Monographs},
\bold{74}, 685--701.
+
Yee, T. W. (2006)
Constrained additive ordination.
\emph{Ecology}, \bold{87}, 203--213.
+
}
\author{ Thomas W. Yee }
@@ -61,24 +70,25 @@ Constrained additive ordination.
\code{Max.qrrvglm},
\code{\link{Tol}},
\code{\link{Opt}}.
+
+
}
\examples{
\dontrun{
-set.seed(111) # This leads to the global solution
-hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
-p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
- Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
- Trocterr, Zoraspin) ~
- WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- Bestof = 2,
- fam = quasipoissonff, data = hspider, Crow1positive=FALSE)
+set.seed(111) # This leads to the global solution
+hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars
+p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
+ Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ quasipoissonff, Bestof = 2, data = hspider, Crow1positive = FALSE)
Max(p1)
-index = 1:ncol(p1 at y)
-persp(p1, col=index, las=1, lwd=2)
-abline(h=Max(p1), lty=2, col=index)
+index <- 1:ncol(depvar(p1))
+persp(p1, col = index, las = 1, llwd = 2)
+abline(h = Max(p1), lty = 2, col = index)
}
}
diff --git a/man/Pareto.Rd b/man/Pareto.Rd
index 012ee8e..1aa2d44 100644
--- a/man/Pareto.Rd
+++ b/man/Pareto.Rd
@@ -12,7 +12,7 @@
}
\usage{
-dpareto(x, location, shape, log=FALSE)
+dpareto(x, location, shape, log = FALSE)
ppareto(q, location, shape)
qpareto(p, location, shape)
rpareto(n, location, shape)
@@ -24,7 +24,7 @@ rpareto(n, location, shape)
\item{location, shape}{the \eqn{\alpha}{alpha} and \eqn{k} parameters.}
\item{log}{
Logical.
- If \code{log=TRUE} then the logarithm of the density is returned.
+ If \code{log = TRUE} then the logarithm of the density is returned.
}
@@ -56,18 +56,18 @@ New York: Wiley-Interscience, Third edition.
}
\examples{
-alpha = 3; k = exp(1); x = seq(2.8, 8, len=300)
+alpha <- 3; k <- exp(1); x <- seq(2.8, 8, len = 300)
\dontrun{
-plot(x, dpareto(x, location=alpha, shape=k), type="l",
- main="Pareto density split into 10 equal areas")
-abline(h=0, col="blue", lty=2)
-qq = qpareto(seq(0.1,0.9,by=0.1),location=alpha,shape=k)
-lines(qq, dpareto(qq, loc=alpha, shape=k), col="purple", lty=3, type="h")
+plot(x, dpareto(x, location = alpha, shape = k), type = "l",
+ main = "Pareto density split into 10 equal areas")
+abline(h = 0, col = "blue", lty = 2)
+qq <- qpareto(seq(0.1,0.9,by = 0.1),location = alpha,shape = k)
+lines(qq, dpareto(qq, loc = alpha, shape = k), col = "purple", lty = 3, type = "h")
}
-pp = seq(0.1,0.9,by=0.1)
-qq = qpareto(pp, location=alpha, shape=k)
-ppareto(qq, location=alpha, shape=k)
-qpareto(ppareto(qq,loc=alpha,shape=k),loc=alpha,shape=k) - qq # Should be 0
+pp <- seq(0.1,0.9,by = 0.1)
+qq <- qpareto(pp, location = alpha, shape = k)
+ppareto(qq, location = alpha, shape = k)
+qpareto(ppareto(qq,loc = alpha,shape = k),loc = alpha,shape = k) - qq # Should be 0
}
\keyword{distribution}
diff --git a/man/Qvar.Rd b/man/Qvar.Rd
index 9082588..57639d1 100644
--- a/man/Qvar.Rd
+++ b/man/Qvar.Rd
@@ -8,7 +8,7 @@ Quasi-variances Preprocessing Function
}
\description{
Takes a \code{\link{vglm}} fit or a variance-covariance matrix,
- and preprocesses it for \code{\link{rcam}} and
+ and preprocesses it for \code{\link{rcim}} and
\code{\link{normal1}} so that quasi-variances can be computed.
@@ -84,8 +84,8 @@ Qvar(object, factorname = NULL, coef.indices = NULL, labels = NULL,
can compute \eqn{L} quasi-variances based on all pairwise difference
of the coefficients. They are based on an approximation, and can be
treated as uncorrelated. In minimizing the relative (not absolute)
- errors it is not hard to see that the estimation involves a RCAM
- (\code{\link{rcam}}) with an exponential link function
+ errors it is not hard to see that the estimation involves a RCIM
+ (\code{\link{rcim}}) with an exponential link function
(\code{\link{explink}}).
@@ -159,7 +159,7 @@ Qvar(object, factorname = NULL, coef.indices = NULL, labels = NULL,
It is important to set \code{maxit} to be larger than usual for
- \code{\link{rcam}} since convergence is slow. Upon successful
+ \code{\link{rcim}} since convergence is slow. Upon successful
convergence the \eqn{i}th row effect and the \eqn{i}th column effect
should be equal. A simple computation involving the fitted and
predicted values allows the quasi-variances to be extracted (see
@@ -183,7 +183,7 @@ Qvar(object, factorname = NULL, coef.indices = NULL, labels = NULL,
\seealso{
- \code{\link{rcam}},
+ \code{\link{rcim}},
\code{\link{vglm}},
\code{\link{normal1}},
\code{\link{explink}},
@@ -202,18 +202,18 @@ Shipmodel <- vglm(incidents ~ type + year + period,
data = ships, subset = (service > 0))
# Easiest form of input
-fit1 <- rcam(Qvar(Shipmodel, "type"), normal1("explink"), maxit = 99)
+fit1 <- rcim(Qvar(Shipmodel, "type"), normal1("explink"), maxit = 99)
(quasiVar <- exp(diag(fitted(fit1))) / 2) # Version 1
(quasiVar <- diag(predict(fit1)[, c(TRUE, FALSE)]) / 2) # Version 2
(quasiSE <- sqrt(quasiVar))
# Another form of input
-fit2 <- rcam(Qvar(Shipmodel, coef.ind = c(0,2:5), reference.name = "typeA"),
+fit2 <- rcim(Qvar(Shipmodel, coef.ind = c(0,2:5), reference.name = "typeA"),
normal1("explink"), maxit = 99)
-\dontrun{ plotqvar(fit2, col = "orange", lwd = 3, scol = "blue", slwd = 2, las = 1) }
+\dontrun{ plotqvar(fit2, col = "green", lwd = 3, scol = "blue", slwd = 2, las = 1) }
# The variance-covariance matrix is another form of input (not recommended)
-fit3 <- rcam(Qvar(cbind(0, rbind(0, vcov(Shipmodel)[2:5, 2:5])),
+fit3 <- rcim(Qvar(cbind(0, rbind(0, vcov(Shipmodel)[2:5, 2:5])),
labels = c("typeA", "typeB", "typeC", "typeD", "typeE"),
estimates = c(typeA = 0, coef(Shipmodel)[2:5])),
normal1("explink"), maxit = 99)
diff --git a/man/Rcam.Rd b/man/Rcam.Rd
index 8df357d..d31ffb6 100644
--- a/man/Rcam.Rd
+++ b/man/Rcam.Rd
@@ -1,5 +1,5 @@
-\name{Rcam}
-\alias{Rcam}
+\name{Rcim}
+\alias{Rcim}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Mark the Baseline of Row and Column on a Matrix data
@@ -8,12 +8,13 @@
\description{
Rearrange the rows and columns of the input so
that the first row and first column are baseline.
- This function is for rank-zero row-column association models
- (RCAMs; i.e., general main effects models).
+ This function is for rank-zero row-column interaction models
+ (RCIMs; i.e., general main effects models).
+
}
\usage{
- Rcam(mat, rbaseline = 1, cbaseline = 1)
+ Rcim(mat, rbaseline = 1, cbaseline = 1)
}
%- maybe also 'usage' for other objects documented here.
@@ -32,8 +33,8 @@
}
}
\details{
- This is a data preprocessing function for \code{\link{rcam}}.
- For rank-zero row-column association models this function
+ This is a data preprocessing function for \code{\link{rcim}}.
+ For rank-zero row-column interaction models this function
establishes the baseline (or reference) levels of the matrix
response with respect to the row and columns---these become
the new first row and column.
@@ -50,6 +51,7 @@
\author{
Alfian F. Hadi and T. W. Yee.
+
}
\note{
This function is similar to \code{\link{moffset}}; see
@@ -70,13 +72,14 @@ Alfian F. Hadi and T. W. Yee.
\seealso{
\code{\link{moffset}},
- \code{\link{rcam}},
- \code{\link{plotrcam0}}.
+ \code{\link{rcim}},
+ \code{\link{plotrcim0}}.
+
}
\examples{
(alcoff.e <- moffset(alcoff, roffset = "6", postfix = "*"))
-(aa = Rcam(alcoff, rbaseline = "11", cbaseline = "Sunday"))
-(bb = moffset(alcoff, "11", "Sunday", postfix = "*"))
-aa - bb # Notice the difference!
+(aa <- Rcim(alcoff, rbaseline = "11", cbaseline = "Sun"))
+(bb <- moffset(alcoff, "11", "Sun", postfix = "*"))
+aa - bb # Notice the difference!
}
diff --git a/man/Tol.Rd b/man/Tol.Rd
index a2190dc..c48f901 100644
--- a/man/Tol.Rd
+++ b/man/Tol.Rd
@@ -12,16 +12,21 @@ Tol(object, ...)
\arguments{
\item{object}{ An object for which the computation or
extraction of a tolerance or tolerances is meaningful.
+
+
}
\item{\dots}{ Other arguments fed into the specific
methods function of the model. Sometimes they are fed
into the methods function for \code{\link{Coef}}.
+
+
}
}
\details{
Different models can define an optimum in different ways.
Many models have no such notion or definition.
+
Tolerances occur in quadratic ordination, i.e., CQO or UQO.
They have ecological meaning because a high tolerance
for a species means the species can survive over a large
@@ -31,23 +36,30 @@ Tol(object, ...)
Mathematically, the tolerance is like the variance of
a normal distribution.
+
}
\value{
The value returned depends specifically on the methods
function invoked.
+
+
}
\references{
+
Yee, T. W. (2004)
A new technique for maximum-likelihood
canonical Gaussian ordination.
\emph{Ecological Monographs},
\bold{74}, 685--701.
+
Yee, T. W. (2006)
Constrained additive ordination.
\emph{Ecology}, \bold{87}, 203--213.
+
+
}
\author{ Thomas W. Yee }
@@ -56,6 +68,8 @@ Constrained additive ordination.
Tolerances are undefined for `linear' and additive
ordination models.
They are well-defined for quadratic ordination models.
+
+
}
\section{Warning }{
There is a direct inverse relationship between the scaling of
@@ -70,25 +84,28 @@ Constrained additive ordination.
\code{EqualTolerances} is \code{TRUE} or \code{FALSE}.
See Yee (2004) for details.
+
+
}
\seealso{
\code{Tol.qrrvglm}.
\code{\link{Max}},
\code{\link{Opt}}.
+
+
}
\examples{
-set.seed(111) # This leads to the global solution
-hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
-# vvv p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
-# vvv Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
-# vvv Trocterr, Zoraspin) ~
-# vvv WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
-# vvv Bestof = 2,
-# vvv fam = quasipoissonff, data = hspider, Crow1positive=FALSE)
-# vvv
-# vvv Tol(p1)
+set.seed(111) # This leads to the global solution
+hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars
+p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
+ Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ Bestof = 2, quasipoissonff, data = hspider, Crow1positive = FALSE)
+
+Tol(p1)
}
\keyword{models}
\keyword{regression}
diff --git a/man/VGAM-package.Rd b/man/VGAM-package.Rd
index e329b84..5f8890a 100644
--- a/man/VGAM-package.Rd
+++ b/man/VGAM-package.Rd
@@ -8,12 +8,13 @@ Vector Generalized Linear and Additive Models
\description{
\pkg{VGAM} provides functions for fitting vector generalized
linear and additive models (VGLMs and VGAMs), and associated
- models (Reduced-Rank VGLMs, Quadratic RR-VGLMs, Reduced-Rank
- VGAMs). This package fits many models and distributions by
+ models (Reduced-rank VGLMs, Quadratic RR-VGLMs, Reduced-rank
+ VGAMs). This package fits many models and distributions by
maximum likelihood estimation (MLE) or penalized MLE. Also fits
constrained ordination models in ecology such as constrained
quadratic ordination (CQO).
+
}
\details{
@@ -40,10 +41,16 @@ possible. VGLMs are limited only by the assumption that the
regression coefficients enter through a set of linear predictors.
The VGLM class is very large and encompasses a wide range of
multivariate response types and models, e.g., it includes
-univariate and multivariate distributions, categorical data analysis,
-time series, survival analysis, generalized estimating equations,
+univariate and multivariate distributions,
+categorical data analysis,
+time series,
+survival analysis,
+generalized estimating equations,
extreme values,
-correlated binary data, bioassay data and nonlinear least-squares
+correlated binary data,
+quantile and expectile regression,
+bioassay data and
+nonlinear least-squares
problems.
@@ -56,8 +63,11 @@ the covariates.
For a complete list of this package, use \code{library(help = "VGAM")}.
New \pkg{VGAM} family functions are continually being written and
added to the package.
-A monograph about VGLM and VGAMs etc. is in the making but unfortunately
-will not be finished for a while.
+A monograph about VGLM and VGAMs etc. is currently in the making.
+
+
+
+%but unfortunately will not be finished for a while.
%~~ An overview of how to use the package, including the most important ~~
@@ -72,6 +82,24 @@ Thomas W. Yee, \email{t.yee at auckland.ac.nz}.
Maintainer: Thomas Yee \email{t.yee at auckland.ac.nz}.
}
+
+\section{Warning}{
+ This package is undergoing continual development and improvement.
+ Until my monograph comes out and this package is released as version 1.0-0
+ the user should treat everything subject to change.
+ This includes the family function names, many of the internals,
+ the use of link functions, and slot names.
+ Some future pain can be minimized by using good programming
+ techniques, e.g., using extractor/accessor functions such as
+ \code{coef()}, \code{weights()}, \code{vcov()},
+ \code{predict()}.
+ Nevertheless, please expect changes in all aspects of the package.
+
+
+
+}
+
+
\references{
@@ -122,13 +150,14 @@ The \code{VGAM} Package.
\emph{R News}, \bold{8}, 28--39.
-Documentation accompanying the \pkg{VGAM} package at
+(Oldish) documentation accompanying the \pkg{VGAM} package at
\url{http://www.stat.auckland.ac.nz/~yee/VGAM}
-contains further information and examples.
+contains some further information and examples.
}
+
\keyword{ package }
\keyword{models}
\keyword{regression}
@@ -136,8 +165,11 @@ contains further information and examples.
\code{\link{vglm}},
\code{\link{vgam}},
\code{\link{rrvglm}},
+ \code{\link{cqo}},
\code{\link{TypicalVGAMfamilyFunction}},
- \code{\link{CommonVGAMffArguments}}.
+ \code{\link{CommonVGAMffArguments}},
+ \code{\link{Links}}.
+
%~~ Optional links to other man pages, e.g. ~~
@@ -145,10 +177,9 @@ contains further information and examples.
}
\examples{
# Example 1; proportional odds model
-pneumo = transform(pneumo, let = log(exposure.time))
-(fit = vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo))
-fit at y # Sample proportions
-depvar(fit) # Better than using fit at y; dependent variable (response)
+pneumo <- transform(pneumo, let = log(exposure.time))
+(fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo))
+depvar(fit) # Better than using fit at y; dependent variable (response)
weights(fit, type = "prior") # Number of observations
coef(fit, matrix = TRUE) # p.179, in McCullagh and Nelder (1989)
constraints(fit) # Constraint matrices
@@ -156,22 +187,22 @@ summary(fit)
# Example 2; zero-inflated Poisson model
-zdata = data.frame(x2 = runif(nn <- 2000))
-zdata = transform(zdata, pstr0 = logit(-0.5 + 1*x2, inverse = TRUE),
- lambda = loge( 0.5 + 2*x2, inverse = TRUE))
-zdata = transform(zdata, y = rzipois(nn, lambda, pstr0 = pstr0))
+zdata <- data.frame(x2 = runif(nn <- 2000))
+zdata <- transform(zdata, pstr0 = logit(-0.5 + 1*x2, inverse = TRUE),
+ lambda = loge( 0.5 + 2*x2, inverse = TRUE))
+zdata <- transform(zdata, y = rzipois(nn, lambda, pstr0 = pstr0))
with(zdata, table(y))
-fit = vglm(y ~ x2, zipoisson, zdata, trace = TRUE)
+fit <- vglm(y ~ x2, zipoisson, zdata, trace = TRUE)
coef(fit, matrix = TRUE) # These should agree with the above values
# Example 3; fit a two species GAM simultaneously
-fit2 = vgam(cbind(agaaus, kniexc) ~ s(altitude, df = c(2, 3)),
+fit2 <- vgam(cbind(agaaus, kniexc) ~ s(altitude, df = c(2, 3)),
binomialff(mv = TRUE), hunua)
coef(fit2, matrix = TRUE) # Not really interpretable
\dontrun{ plot(fit2, se = TRUE, overlay = TRUE, lcol = 1:2, scol = 1:2)
-ooo = with(hunua, order(altitude))
+ooo <- with(hunua, order(altitude))
with(hunua, matplot(altitude[ooo], fitted(fit2)[ooo,], type = "l", lwd = 2,
xlab = "Altitude (m)", ylab = "Probability of presence", las = 1,
main = "Two plant species' response curves", ylim = c(0, 0.8)))
@@ -179,7 +210,7 @@ with(hunua, rug(altitude)) }
# Example 4; LMS quantile regression
-fit = vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), dat = bmi.nz,
+fit <- vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), dat = bmi.nz,
trace = TRUE)
head(predict(fit))
head(fitted(fit))
@@ -190,26 +221,26 @@ head(cdf(fit))
qtplot(fit, percentiles = c(5,50,90,99), main = "Quantiles", las = 1,
xlim = c(15, 90), ylab = "BMI", lwd = 2, lcol = 4) # Quantile plot
-ygrid = seq(15, 43, len = 100) # BMI ranges
+ygrid <- seq(15, 43, len = 100) # BMI ranges
par(mfrow = c(1, 1), lwd = 2) # Density plot
-aa = deplot(fit, x0 = 20, y = ygrid, xlab = "BMI", col = "black",
+aa <- deplot(fit, x0 = 20, y = ygrid, xlab = "BMI", col = "black",
main = "Density functions at Age = 20 (black), 42 (red) and 55 (blue)")
aa
-aa = deplot(fit, x0 = 42, y = ygrid, add = TRUE, llty = 2, col = "red")
-aa = deplot(fit, x0 = 55, y = ygrid, add = TRUE, llty = 4, col = "blue",
+aa <- deplot(fit, x0 = 42, y = ygrid, add = TRUE, llty = 2, col = "red")
+aa <- deplot(fit, x0 = 55, y = ygrid, add = TRUE, llty = 4, col = "blue",
Attach = TRUE)
aa at post$deplot # Contains density function values
}
# Example 5; GEV distribution for extremes
-(fit = vglm(maxtemp ~ 1, egev, data = oxtemp, trace = TRUE))
+(fit <- vglm(maxtemp ~ 1, egev, data = oxtemp, trace = TRUE))
head(fitted(fit))
coef(fit, matrix = TRUE)
Coef(fit)
vcov(fit)
vcov(fit, untransform = TRUE)
-sqrt(diag(vcov(fit))) # Approximate standard errors
+sqrt(diag(vcov(fit))) # Approximate standard errors
\dontrun{ rlplot(fit) }
}
diff --git a/man/acat.Rd b/man/acat.Rd
index c2adf14..76c07c1 100644
--- a/man/acat.Rd
+++ b/man/acat.Rd
@@ -7,9 +7,8 @@
(preferably) factor response.
}
\usage{
-acat(link = "loge", earg = list(),
- parallel = FALSE, reverse = FALSE, zero = NULL,
- whitespace = FALSE)
+acat(link = "loge", parallel = FALSE, reverse = FALSE,
+ zero = NULL, whitespace = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -20,11 +19,6 @@ acat(link = "loge", earg = list(),
See \code{\link{Links}} for more choices.
}
- \item{earg}{
- List. Extra argument for the link function.
- See \code{\link{CommonVGAMffArguments}} for more information.
-
- }
\item{parallel}{
A logical, or formula specifying which terms have
equal/unequal coefficients.
@@ -39,16 +33,19 @@ acat(link = "loge", earg = list(),
\eqn{\eta_j = \log(P[Y=j]/P[Y=j+1])}{eta_j=log(P[Y=j]/P[Y=j+1])}
will be used.
+
}
\item{zero}{
An integer-valued vector specifying which
linear/additive predictors are modelled as intercepts only.
The values must be from the set \{1,2,\ldots,\eqn{M}\}.
+
}
\item{whitespace}{
See \code{\link{CommonVGAMffArguments}} for information.
+
}
}
\details{
@@ -96,7 +93,7 @@ contains further information and examples.
\author{ Thomas W. Yee }
\note{
The response should be either a matrix of counts (with row sums that are
- all positive), or a factor. In both cases, the \code{y} slot returned
+ all positive), or an ordered factor. In both cases, the \code{y} slot returned
by \code{vglm}/\code{vgam}/\code{rrvglm} is the matrix of counts.
@@ -114,7 +111,8 @@ contains further information and examples.
}
\section{Warning }{
- No check is made to verify that the response is ordinal;
+ No check is made to verify that the response is ordinal if the
+ response is a matrix;
see \code{\link[base:factor]{ordered}}.
}
@@ -129,7 +127,7 @@ contains further information and examples.
}
\examples{
pneumo <- transform(pneumo, let = log(exposure.time))
-(fit <- vglm(cbind(normal,mild,severe) ~ let, acat, pneumo))
+(fit <- vglm(cbind(normal, mild, severe) ~ let, acat, pneumo))
coef(fit, matrix = TRUE)
constraints(fit)
model.matrix(fit)
diff --git a/man/alaplace3.Rd b/man/alaplace3.Rd
index ac327e6..77f27b3 100644
--- a/man/alaplace3.Rd
+++ b/man/alaplace3.Rd
@@ -11,21 +11,19 @@
}
\usage{
-alaplace1(tau = NULL, llocation = "identity", elocation = list(),
+alaplace1(tau = NULL, llocation = "identity",
ilocation = NULL, kappa = sqrt(tau/(1 - tau)), Scale.arg = 1,
shrinkage.init = 0.95, parallelLocation = FALSE, digt = 4,
dfmu.init = 3, intparloc = FALSE, imethod = 1)
alaplace2(tau = NULL, llocation = "identity", lscale = "loge",
- elocation = list(), escale = list(),
ilocation = NULL, iscale = NULL, kappa = sqrt(tau/(1 - tau)),
shrinkage.init = 0.95,
- parallelLocation = FALSE, digt = 4, sameScale = TRUE,
+ parallelLocation = FALSE, digt = 4, eq.scale = TRUE,
dfmu.init = 3, intparloc = FALSE,
imethod = 1, zero = -2)
alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
- elocation = list(), escale = list(), ekappa = list(),
ilocation = NULL, iscale = NULL, ikappa = 1,
imethod = 1, zero = 2:3)
}
@@ -52,11 +50,6 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
only works properly with the identity link.
}
- \item{elocation, escale, ekappa}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{ilocation, iscale, ikappa}{
Optional initial values.
If given, it must be numeric and values are recycled to the
@@ -73,9 +66,9 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
the argument \code{parallelLocation} applies to other terms.
}
- \item{sameScale}{ Logical.
+ \item{eq.scale}{ Logical.
Should the scale parameters be equal? It is advised
- to keep \code{sameScale = TRUE} unchanged because it
+ to keep \code{eq.scale = TRUE} unchanged because it
does not make sense to have different values for each
\code{tau} value.
@@ -287,10 +280,10 @@ alaplace3(llocation = "identity", lscale = "loge", lkappa = "loge",
\examples{
# Example 1: quantile regression with smoothing splines
-adata = data.frame(x = sort(runif(n <- 500)))
-mymu = function(x) exp(-2 + 6*sin(2*x-0.2) / (x+0.5)^2)
-adata = transform(adata, y = rpois(n, lambda = mymu(x)))
-mytau = c(0.25, 0.75); mydof = 4
+adata <- data.frame(x = sort(runif(n <- 500)))
+mymu <- function(x) exp(-2 + 6*sin(2*x-0.2) / (x+0.5)^2)
+adata <- transform(adata, y = rpois(n, lambda = mymu(x)))
+mytau <- c(0.25, 0.75); mydof <- 4
fit = vgam(y ~ s(x, df = mydof),
alaplace1(tau = mytau, llocation = "loge",
@@ -312,7 +305,7 @@ finexgrid = seq(0, 1, len = 1001)
for(ii in 1:length(mytau))
lines(finexgrid, qpois(p = mytau[ii], lambda = mymu(finexgrid)),
col = "blue", lwd = mylwd) }
-fit at extra # Contains useful information
+fit at extra # Contains useful information
# Example 2: regression quantile at a new tau value from an existing fit
diff --git a/man/alaplaceUC.Rd b/man/alaplaceUC.Rd
index 6061f22..ec59d62 100644
--- a/man/alaplaceUC.Rd
+++ b/man/alaplaceUC.Rd
@@ -111,7 +111,7 @@ abline(h = 0, lty = 2) }
pp = seq(0.05, 0.95, by = 0.05) # Test two functions
max(abs(palap(qalap(pp, loc, sigma, kappa = kappa),
- loc, sigma, kappa = kappa) - pp)) # Should be 0
+ loc, sigma, kappa = kappa) - pp)) # Should be 0
}
\keyword{distribution}
diff --git a/man/amh.Rd b/man/amh.Rd
index 10cc584..7abc587 100644
--- a/man/amh.Rd
+++ b/man/amh.Rd
@@ -9,8 +9,7 @@
}
\usage{
-amh(lalpha = "rhobit", ealpha = list(), ialpha = NULL,
- imethod = 1, nsimEIM = 250)
+amh(lalpha = "rhobit", ialpha = NULL, imethod = 1, nsimEIM = 250)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -21,11 +20,6 @@ amh(lalpha = "rhobit", ealpha = list(), ialpha = NULL,
See \code{\link{Links}} for more choices.
}
- \item{ealpha}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{ialpha}{
Numeric. Optional initial value for \eqn{\alpha}{alpha}.
By default, an initial value is chosen internally.
diff --git a/man/amhUC.Rd b/man/amhUC.Rd
index b9fb224..167eee5 100644
--- a/man/amhUC.Rd
+++ b/man/amhUC.Rd
@@ -11,7 +11,7 @@
}
\usage{
-damh(x1, x2, alpha, log=FALSE)
+damh(x1, x2, alpha, log = FALSE)
pamh(q1, q2, alpha)
ramh(n, alpha)
}
diff --git a/man/amlbinomial.Rd b/man/amlbinomial.Rd
index ba5849c..7edc971 100644
--- a/man/amlbinomial.Rd
+++ b/man/amlbinomial.Rd
@@ -8,8 +8,7 @@
}
\usage{
-amlbinomial(w.aml = 1, parallel = FALSE, digw = 4,
- link = "logit", earg = list())
+amlbinomial(w.aml = 1, parallel = FALSE, digw = 4, link = "logit")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -37,7 +36,7 @@ amlbinomial(w.aml = 1, parallel = FALSE, digw = 4,
used cosmetically for labelling.
}
- \item{link, earg}{
+ \item{link}{
See \code{\link{binomialff}}.
}
diff --git a/man/amlexponential.Rd b/man/amlexponential.Rd
index 49ebd6b..dbbe289 100644
--- a/man/amlexponential.Rd
+++ b/man/amlexponential.Rd
@@ -9,7 +9,7 @@
}
\usage{
amlexponential(w.aml = 1, parallel = FALSE, imethod = 1, digw = 4,
- link = "loge", earg = list())
+ link = "loge")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -42,7 +42,7 @@ amlexponential(w.aml = 1, parallel = FALSE, imethod = 1, digw = 4,
used cosmetically for labelling.
}
- \item{link, earg}{
+ \item{link}{
See \code{\link{exponential}} and the warning below.
}
diff --git a/man/amlnormal.Rd b/man/amlnormal.Rd
index 12ea2fe..761f945 100644
--- a/man/amlnormal.Rd
+++ b/man/amlnormal.Rd
@@ -12,7 +12,7 @@
}
\usage{
amlnormal(w.aml = 1, parallel = FALSE, lexpectile = "identity",
- eexpectile = list(), iexpectile = NULL, imethod = 1, digw = 4)
+ iexpectile = NULL, imethod = 1, digw = 4)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -34,7 +34,7 @@ amlnormal(w.aml = 1, parallel = FALSE, lexpectile = "identity",
See \code{\link{CommonVGAMffArguments}} for more information.
}
- \item{lexpectile, eexpectile, iexpectile}{
+ \item{lexpectile, iexpectile}{
See \code{\link{CommonVGAMffArguments}} for more information.
}
diff --git a/man/amlpoisson.Rd b/man/amlpoisson.Rd
index 285fc5b..e00ed12 100644
--- a/man/amlpoisson.Rd
+++ b/man/amlpoisson.Rd
@@ -9,7 +9,7 @@
}
\usage{
amlpoisson(w.aml = 1, parallel = FALSE, imethod = 1, digw = 4,
- link = "loge", earg = list())
+ link = "loge")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -42,7 +42,7 @@ amlpoisson(w.aml = 1, parallel = FALSE, imethod = 1, digw = 4,
used cosmetically for labelling.
}
- \item{link, earg}{
+ \item{link}{
See \code{\link{poissonff}}.
}
diff --git a/man/backPain.Rd b/man/backPain.Rd
index 210d03e..5299087 100644
--- a/man/backPain.Rd
+++ b/man/backPain.Rd
@@ -6,6 +6,7 @@
Data from a study of patients suffering from back pain. Prognostic
variables were recorded at presentation and progress was categorised
three weeks after treatment.
+
}
\usage{data(backPain)}
\format{
@@ -23,11 +24,13 @@
\source{
\url{http://ideas.repec.org/c/boc/bocode/s419001.html}
+
The data set and this help file was copied from \pkg{gnm}
so that a vignette in \pkg{VGAM} could be run; the analysis is
described in Yee (2010).
+
}
\references{
Anderson, J. A. (1984) Regression and Ordered Categorical
diff --git a/man/benini.Rd b/man/benini.Rd
index 2e0e17c..8980f99 100644
--- a/man/benini.Rd
+++ b/man/benini.Rd
@@ -3,13 +3,13 @@
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Benini Distribution Family Function }
\description{
- Estimating the parameter of the Benini distribution by maximum
+ Estimating the 1-parameter Benini distribution by maximum
likelihood estimation.
}
\usage{
-benini(y0 = stop("argument 'y0' must be specified"),
- lshape = "loge", earg = list(), ishape = NULL, imethod = 1)
+benini(y0 = stop("argument 'y0' must be specified"), lshape = "loge",
+ ishape = NULL, imethod = 1, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -18,26 +18,19 @@ benini(y0 = stop("argument 'y0' must be specified"),
}
\item{lshape}{
- Parameter link function applied to the parameter \eqn{b},
+ Parameter link function and extra argument of the parameter \eqn{b},
which is the shape parameter.
See \code{\link{Links}} for more choices.
A log link is the default because \eqn{b} is positive.
}
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{ishape}{
Optional initial value for the shape parameter.
The default is to compute the value internally.
}
- \item{imethod}{
- An integer with value \code{1} or \code{2} which
- specifies the initialization method. If failure to converge occurs
- try the other value, or else specify a value for \code{ishape}.
+ \item{imethod, zero}{
+ Details at \code{\link{CommonVGAMffArguments}}.
}
}
@@ -46,27 +39,35 @@ benini(y0 = stop("argument 'y0' must be specified"),
has a probability density function that can be written
\deqn{f(y) = 2 b \exp(-b[(\log(y/y_0))^2]) \log(y/y_0) / y }{%
f(y) = 2*b*exp(-b * [(log(y/y0))^2]) * log(y/y0) / y}
- for \eqn{0 < y_0 < y}{0<y0<y}, and \eqn{b>0}.
+ for \eqn{0 < y_0 < y}{0 < y0 < y}, and \eqn{b > 0}.
The cumulative distribution function for \eqn{Y} is
\deqn{F(y) = 1 - \exp(-b[(\log(y/y_0))^2]).}{%
- F(y) = 1 - exp(-b * [(log(y/y0))^2]). }
+ F(y) = 1 - exp(-b * [(log(y / y0))^2]). }
Here, Newton-Raphson and Fisher scoring coincide.
+ The median of \eqn{Y} is now returned as the fitted values.
+ This \pkg{VGAM} family function can handle a multiple
+ responses, which is inputted as a matrix.
On fitting, the \code{extra} slot has a component called \code{y0} which
contains the value of the \code{y0} argument.
-}
-\section{Warning}{
- The mean of \eqn{Y}, which are returned as the fitted values,
- may be incorrect.
}
+%\section{Warning}{
+%
+%
+% The median of \eqn{Y}, which are returned as the fitted values,
+% may be incorrect.
+%
+%
+%}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
@@ -74,6 +75,7 @@ Kleiber, C. and Kotz, S. (2003)
\emph{Statistical Size Distributions in Economics and Actuarial Sciences},
Hoboken, NJ, USA: Wiley-Interscience.
+
}
\author{ T. W. Yee }
\note{
@@ -81,20 +83,21 @@ Hoboken, NJ, USA: Wiley-Interscience.
as well, and the 3-parameter Benini distribution estimates another
shape parameter \eqn{a}{a} too.
+
}
\seealso{
- \code{\link{Benini}}.
+ \code{\link{Benini}}.
+
}
\examples{
-y0 = 1
-bdata = data.frame(y = rbenini(n = 3000, y0 = y0, shape = exp(2)))
-fit = vglm(y ~ 1, benini(y0 = y0), bdata, trace = TRUE, crit = "coef")
+y0 <- 1; nn <- 3000
+bdata <- data.frame(y = rbenini(nn, y0 = y0, shape = exp(2)))
+fit <- vglm(y ~ 1, benini(y0 = y0), bdata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit)
fit at extra$y0
-head(fitted(fit), 1) # Apparent discrepancy:
-with(bdata, mean(y))
+c(head(fitted(fit), 1), with(bdata, median(y))) # Should be equal
}
\keyword{models}
\keyword{regression}
diff --git a/man/beta.ab.Rd b/man/beta.ab.Rd
index 75f3b48..83f321e 100644
--- a/man/beta.ab.Rd
+++ b/man/beta.ab.Rd
@@ -9,7 +9,6 @@
}
\usage{
beta.ab(lshape1 = "loge", lshape2 = "loge",
- eshape1 = list(), eshape2 = list(),
i1 = NULL, i2 = NULL, trim = 0.05,
A = 0, B = 1, parallel = FALSE, zero = NULL)
}
@@ -21,11 +20,6 @@ beta.ab(lshape1 = "loge", lshape2 = "loge",
The log link (defaults) ensures that the parameters are positive.
}
- \item{eshape1, eshape2}{
- List. Extra argument for the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{i1, i2}{
Initial value for the first and second shape parameters respectively.
A \code{NULL} value means it is obtained in the \code{initialize} slot.
@@ -79,8 +73,10 @@ beta.ab(lshape1 = "loge", lshape2 = "loge",
and a precision parameter is implemented in \code{\link{betaff}}.
- If \eqn{A} and \eqn{B} are unknown, then the \pkg{VGAM} family function
- \code{beta4()} can be used to estimate these too.
+% 20120525:
+% Regularity conditions not satisfied; support depends on the parameters:
+% If \eqn{A} and \eqn{B} are unknown, then the \pkg{VGAM} family function
+% \code{beta4()} can be used to estimate these too.
}
@@ -131,8 +127,7 @@ beta.ab(lshape1 = "loge", lshape2 = "loge",
\code{\link{betaprime}},
\code{\link{rbetageom}},
\code{\link{rbetanorm}},
- \code{\link{kumar}},
- \code{beta4}.
+ \code{\link{kumar}}.
}
\examples{
@@ -152,8 +147,8 @@ c(meanY = with(bdata, mean(Y)), head(fitted(fit),2))
\keyword{regression}
% 3/1/06; this works well:
-% fit = vglm(y~1, beta.abqn(link = logoff,earg = list(offset = 1)), tr = TRUE, cri = "c")
+% fit = vglm(y~1, beta.abqn(link = logoff(offset = 1), tr = TRUE, cri = "c")
% 3/1/06; this does not work so well:
-% it = vglm(y~1, beta.abqn(link = logoff,earg = list(offset = 0)), tr = TRUE, cri = "c")
+% it = vglm(y~1, beta.abqn(link = logoff(offset = 0), tr = TRUE, cri = "c")
% Interesting!!
diff --git a/man/betaII.Rd b/man/betaII.Rd
index ad80196..b1528e6 100644
--- a/man/betaII.Rd
+++ b/man/betaII.Rd
@@ -8,7 +8,6 @@
}
\usage{
betaII(lscale = "loge", lshape2.p = "loge", lshape3.q = "loge",
- escale = list(), eshape2.p = list(), eshape3.q = list(),
iscale = NULL, ishape2.p = 2, ishape3.q = 2, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -19,11 +18,6 @@ betaII(lscale = "loge", lshape2.p = "loge", lshape3.q = "loge",
See \code{\link{Links}} for more choices.
}
- \item{escale, eshape2.p, eshape3.q}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{iscale, ishape2.p, ishape3.q}{
Optional initial values for \code{scale}, \code{p} and \code{q}.
diff --git a/man/betabinomial.Rd b/man/betabinomial.Rd
index 1a9426f..1483e75 100644
--- a/man/betabinomial.Rd
+++ b/man/betabinomial.Rd
@@ -8,7 +8,7 @@
}
\usage{
-betabinomial(lmu = "logit", lrho = "logit", emu = list(), erho = list(),
+betabinomial(lmu = "logit", lrho = "logit",
irho = NULL, imethod = 1, shrinkage.init = 0.95,
nsimEIM = NULL, zero = 2)
}
@@ -21,11 +21,6 @@ betabinomial(lmu = "logit", lrho = "logit", emu = list(), erho = list(),
however, see the warning below.
}
- \item{emu, erho}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{irho}{
Optional initial value for the correlation parameter.
If given, it must be in \eqn{(0,1)}, and is recyled to the necessary
diff --git a/man/betabinomial.ab.Rd b/man/betabinomial.ab.Rd
index 3976150..aed6c94 100644
--- a/man/betabinomial.ab.Rd
+++ b/man/betabinomial.ab.Rd
@@ -9,7 +9,7 @@
}
\usage{
-betabinomial.ab(lshape12 = "loge", earg = list(), i1 = 1, i2 = NULL,
+betabinomial.ab(lshape12 = "loge", i1 = 1, i2 = NULL,
imethod = 1, shrinkage.init = 0.95, nsimEIM = NULL,
zero = NULL)
}
@@ -21,11 +21,6 @@ betabinomial.ab(lshape12 = "loge", earg = list(), i1 = 1, i2 = NULL,
See \code{\link{Links}} for more choices.
}
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{i1, i2}{
Initial value for the shape parameters.
The first must be positive, and is recyled to the necessary length.
diff --git a/man/betaff.Rd b/man/betaff.Rd
index 8b04748..5fe5dbb 100644
--- a/man/betaff.Rd
+++ b/man/betaff.Rd
@@ -7,39 +7,43 @@
}
\usage{
-betaff(A = 0, B = 1,
- lmu = if (A == 0 & B == 1) "logit" else "elogit", lphi = "loge",
- emu = if (lmu == "elogit") list(min = A, max = B) else list(),
- ephi = list(), imu = NULL, iphi = NULL, imethod = 1, zero = NULL)
+betaff(A = 0, B = 1, lmu = "logit", lphi = "loge",
+ imu = NULL, iphi = NULL, imethod = 1, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
+
+
\item{A, B}{
Lower and upper limits of the distribution.
The defaults correspond to the \emph{standard beta distribution}
where the response lies between 0 and 1.
-
}
+
+
\item{lmu, lphi}{
Link function for the mean and precision parameters.
- See below for more details.
- See \code{\link{Links}} for more choices.
+ The values \eqn{A} and \eqn{B} are extracted from the
+ \code{min} and \code{max} arguments of \code{\link{elogit}}.
+ Consequently, only \code{\link{elogit}} is allowed.
+
+
+% See below for more details.
+% See \code{\link{Links}} for more choices.
- }
- \item{emu, ephi}{
- List. Extra argument for the respective links.
- See \code{earg} in \code{\link{Links}} for general information.
}
\item{imu, iphi}{
Optional initial value for the mean and precision parameters
- respectively. A \code{NULL} value means a value is obtained in the
+ respectively. A \code{NULL} value means a value is obtained in the
\code{initialize} slot.
+
}
\item{imethod, zero}{
See \code{\link{CommonVGAMffArguments}} for more information.
+
}
}
\details{
@@ -60,9 +64,11 @@ betaff(A = 0, B = 1,
Also, \eqn{\phi}{phi} is positive and \eqn{A < \mu < B}{A < mu < B}.
Here, the limits \eqn{A} and \eqn{B} are \emph{known}.
+
Another parameterization of the beta distribution involving the raw
shape parameters is implemented in \code{\link{beta.ab}}.
+
For general \eqn{A} and \eqn{B}, the variance of \eqn{Y} is
\eqn{(B-A)^2 \times \mu_1 \times (1-\mu_1) / (1+\phi)}{(B-A)^2 *
mu1 * (1-mu1) / (1+phi)}.
@@ -71,10 +77,13 @@ betaff(A = 0, B = 1,
\eqn{\phi}{phi}, the smaller the variance of \eqn{Y}.
Also, \eqn{\mu_1 = shape1/(shape1+shape2)}{mu1=shape1/(shape1+shape2)} and
\eqn{\phi = shape1+shape2}{phi = shape1+shape2}.
-
Fisher scoring is implemented.
- If \eqn{A} and \eqn{B} are unknown then the \pkg{VGAM} family function
- \code{beta4()} can be used to estimate these too.
+
+
+% If \eqn{A} and \eqn{B} are unknown then the \pkg{VGAM} family function
+% \code{beta4()} can be used to estimate these too.
+
+
}
\value{
@@ -82,6 +91,7 @@ betaff(A = 0, B = 1,
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
Ferrari, S. L. P. and Francisco C.-N. (2004)
@@ -89,14 +99,20 @@ betaff(A = 0, B = 1,
\emph{Journal of Applied Statistics},
\bold{31}, 799--815.
+
Documentation accompanying the \pkg{VGAM} package at
\url{http://www.stat.auckland.ac.nz/~yee}
contains further information and examples.
+
}
\author{ Thomas W. Yee }
\note{
The response must have values in the interval (\eqn{A}, \eqn{B}).
+ The user currently needs to manually choose \code{lmu} to match
+ the input of arguments \code{A} and \code{B}, e.g.,
+ with \code{\link{elogit}}; see the example below.
+
}
@@ -111,25 +127,27 @@ betaff(A = 0, B = 1,
\code{\link{rbetageom}},
\code{\link{rbetanorm}},
\code{\link{kumar}},
- \code{beta4},
\code{\link{elogit}}.
+
}
\examples{
-bdata = data.frame(y = rbeta(nn <- 1000, shape1 = exp(0), shape2 = exp(1)))
-fit = vglm(y ~ 1, betaff, bdata, trace = TRUE)
-coef(fit, matrix = TRUE)
-Coef(fit) # Useful for intercept-only models
+bdata <- data.frame(y = rbeta(nn <- 1000, shape1 = exp(0), shape2 = exp(1)))
+fit1 <- vglm(y ~ 1, betaff, bdata, trace = TRUE)
+coef(fit1, matrix = TRUE)
+Coef(fit1) # Useful for intercept-only models
# General A and B, and with a covariate
-bdata = transform(bdata, x2 = runif(nn))
-bdata = transform(bdata, mu = logit(0.5 - x2, inverse = TRUE),
- prec = exp(3 + x2)) # prec == phi
-bdata = transform(bdata, shape2 = prec * (1-mu),
+bdata <- transform(bdata, x2 = runif(nn))
+bdata <- transform(bdata, mu = logit(0.5 - x2, inverse = TRUE),
+ prec = exp(3.0 + x2)) # prec == phi
+bdata <- transform(bdata, shape2 = prec * (1 - mu),
shape1 = mu * prec)
-bdata = transform(bdata, y = rbeta(nn, shape1 = shape1, shape2 = shape2))
-bdata = transform(bdata, Y = 5 + 8 * y) # From 5 to 13, not 0 to 1
-fit = vglm(Y ~ x2, betaff(A = 5, B = 13), bdata, trace = TRUE)
+bdata <- transform(bdata,
+ y = rbeta(nn, shape1 = shape1, shape2 = shape2))
+bdata <- transform(bdata, Y = 5 + 8 * y) # From 5 to 13, not 0 to 1
+fit <- vglm(Y ~ x2, data = bdata, trace = TRUE,
+ betaff(A = 5, B = 13, lmu = elogit(min = 5, max = 13)))
coef(fit, matrix = TRUE)
}
\keyword{models}
diff --git a/man/betageometric.Rd b/man/betageometric.Rd
index 8ecd244..a4f2364 100644
--- a/man/betageometric.Rd
+++ b/man/betageometric.Rd
@@ -8,7 +8,6 @@
}
\usage{
betageometric(lprob = "logit", lshape = "loge",
- eprob = list(), eshape = list(),
iprob = NULL, ishape = 0.1,
moreSummation=c(2,100), tolerance=1.0e-10, zero=NULL)
}
@@ -22,11 +21,6 @@ betageometric(lprob = "logit", lshape = "loge",
See \code{\link{Links}} for more choices.
}
- \item{eprob, eshape}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{iprob, ishape}{
Numeric.
Initial values for the two parameters.
diff --git a/man/betanormUC.Rd b/man/betanormUC.Rd
index 1feb3b8..bccce6d 100644
--- a/man/betanormUC.Rd
+++ b/man/betanormUC.Rd
@@ -11,10 +11,11 @@
}
\usage{
-dbetanorm(x, shape1, shape2, mean=0, sd=1, log=FALSE)
-pbetanorm(q, shape1, shape2, mean=0, sd=1, lower.tail=TRUE, log.p=FALSE)
-qbetanorm(p, shape1, shape2, mean=0, sd=1)
-rbetanorm(n, shape1, shape2, mean=0, sd=1)
+dbetanorm(x, shape1, shape2, mean = 0, sd = 1, log = FALSE)
+pbetanorm(q, shape1, shape2, mean = 0, sd = 1,
+ lower.tail = TRUE, log.p = FALSE)
+qbetanorm(p, shape1, shape2, mean = 0, sd = 1)
+rbetanorm(n, shape1, shape2, mean = 0, sd = 1)
}
\arguments{
\item{x, q}{vector of quantiles.}
diff --git a/man/betaprime.Rd b/man/betaprime.Rd
index d9a3d3a..b804404 100644
--- a/man/betaprime.Rd
+++ b/man/betaprime.Rd
@@ -8,7 +8,7 @@
}
\usage{
-betaprime(link = "loge", earg=list(), i1 = 2, i2 = NULL, zero = NULL)
+betaprime(link = "loge", i1 = 2, i2 = NULL, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -17,11 +17,6 @@ betaprime(link = "loge", earg=list(), i1 = 2, i2 = NULL, zero = NULL)
See \code{\link{Links}} for more choices.
}
- \item{earg}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{i1, i2}{
Initial values for the first and second shape parameters.
A \code{NULL} value means it is obtained in the \code{initialize} slot.
@@ -51,6 +46,7 @@ betaprime(link = "loge", earg=list(), i1 = 2, i2 = NULL, zero = NULL)
here, \eqn{B} is the beta function.
The mean of \eqn{Y} is \eqn{shape1 / (shape2-1)} provided \eqn{shape2>1}.
+
If \eqn{Y} has a \eqn{Beta(shape1,shape2)} distribution then
\eqn{Y/(1-Y)} and \eqn{(1-Y)/Y} have a \eqn{Betaprime(shape1,shape2)}
and \eqn{Betaprime(shape2,shape1)} distribution respectively.
@@ -59,6 +55,7 @@ betaprime(link = "loge", earg=list(), i1 = 2, i2 = NULL, zero = NULL)
then \eqn{Y_1/Y_2}{Y1/Y2} has a \eqn{Betaprime(shape1,shape2)}
distribution.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -66,10 +63,12 @@ betaprime(link = "loge", earg=list(), i1 = 2, i2 = NULL, zero = NULL)
\code{\link{rrvglm}}
and \code{\link{vgam}}.
+
}
%% zz not sure about the JKB reference.
\references{
+
Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1995)
Chapter 25 of:
\emph{Continuous Univariate Distributions},
@@ -77,45 +76,50 @@ Chapter 25 of:
Volume 2,
New York: Wiley.
+
Documentation accompanying the \pkg{VGAM} package at
\url{http://www.stat.auckland.ac.nz/~yee}
contains further information and examples.
+
}
\author{ Thomas W. Yee }
\note{
The response must have positive values only.
+
The beta-prime distribution is also known as the
\emph{beta distribution of the second kind} or the
\emph{inverted beta distribution}.
+
}
\seealso{
\code{\link{betaff}}.
+
}
\examples{
-nn = 1000
-betadat = data.frame(shape1 = exp(1), shape2 = exp(3))
-betadat = transform(betadat, yb = rbeta(nn, shape1, shape2))
-betadat = transform(betadat, y1 = (1-yb)/yb, y2 = yb/(1-yb),
+nn <- 1000
+bdata <- data.frame(shape1 = exp(1), shape2 = exp(3))
+bdata <- transform(bdata, yb = rbeta(nn, shape1, shape2))
+bdata <- transform(bdata, y1 = (1-yb)/yb, y2 = yb/(1-yb),
y3 = rgamma(nn, exp(3)) / rgamma(nn, exp(2)))
-fit1 = vglm(y1 ~ 1, betaprime, betadat, trace=TRUE)
-coef(fit1, matrix=TRUE)
+fit1 <- vglm(y1 ~ 1, betaprime, bdata, trace = TRUE)
+coef(fit1, matrix = TRUE)
-fit2 = vglm(y2 ~ 1, betaprime, betadat, trace=TRUE)
-coef(fit2, matrix=TRUE)
+fit2 <- vglm(y2 ~ 1, betaprime, bdata, trace = TRUE)
+coef(fit2, matrix = TRUE)
-fit3 = vglm(y3 ~ 1, betaprime, betadat, trace=TRUE)
-coef(fit3, matrix=TRUE)
+fit3 <- vglm(y3 ~ 1, betaprime, bdata, trace = TRUE)
+coef(fit3, matrix = TRUE)
# Compare the fitted values
-with(betadat, mean(y3))
+with(bdata, mean(y3))
head(fitted(fit3))
-Coef(fit3) # Useful for intercept-only models
+Coef(fit3) # Useful for intercept-only models
}
\keyword{models}
\keyword{regression}
diff --git a/man/bilogistic4.Rd b/man/bilogistic4.Rd
index 6a22731..b21bd0f 100644
--- a/man/bilogistic4.Rd
+++ b/man/bilogistic4.Rd
@@ -121,9 +121,9 @@ Hoboken, NJ, USA: Wiley-Interscience.
}
\examples{
-ymat = rbilogis4(n <- 1000, loc1 = 5, loc2 = 7, scale2 = exp(1))
+ymat <- rbilogis4(n <- 1000, loc1 = 5, loc2 = 7, scale2 = exp(1))
\dontrun{plot(ymat)}
-fit = vglm(ymat ~ 1, fam = bilogistic4, trace = TRUE)
+fit <- vglm(ymat ~ 1, fam = bilogistic4, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
head(fitted(fit))
diff --git a/man/binom2.or.Rd b/man/binom2.or.Rd
index fd86249..89c02fe 100644
--- a/man/binom2.or.Rd
+++ b/man/binom2.or.Rd
@@ -12,7 +12,6 @@
}
\usage{
binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
- emu = list(), emu1 = emu, emu2 = emu, eoratio = list(),
imu1 = NULL, imu2 = NULL, ioratio = NULL, zero = 3,
exchangeable = FALSE, tol = 0.001, morerobust = FALSE)
}
@@ -41,11 +40,6 @@ binom2.or(lmu = "logit", lmu1 = lmu, lmu2 = lmu, loratio = "loge",
arguments if convergence failure occurs.
}
- \item{emu, emu1, emu2, eoratio}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{zero}{
Which linear/additive predictor is modelled as an intercept only? A
\code{NULL} means none.
diff --git a/man/binom2.rho.Rd b/man/binom2.rho.Rd
index cf2e4ae..d798ba0 100644
--- a/man/binom2.rho.Rd
+++ b/man/binom2.rho.Rd
@@ -9,7 +9,7 @@
}
\usage{
-binom2.rho(lrho = "rhobit", erho=list(), imu1 = NULL, imu2 = NULL,
+binom2.rho(lrho = "rhobit", lmu = "probit", imu1 = NULL, imu2 = NULL,
irho = NULL, imethod = 1,
zero = 3, exchangeable = FALSE, nsimEIM = NULL)
binom2.Rho(rho = 0, imu1 = NULL, imu2 = NULL,
@@ -22,9 +22,10 @@ binom2.Rho(rho = 0, imu1 = NULL, imu2 = NULL,
See \code{\link{Links}} for more choices.
}
- \item{erho}{
- List. Extra argument for the \code{lrho} link.
- See \code{earg} in \code{\link{Links}} for general information.
+ \item{lmu}{
+ Link function applied to the marginal probabilities.
+ Should be left alone.
+
}
\item{irho}{
diff --git a/man/binom2.rhoUC.Rd b/man/binom2.rhoUC.Rd
index 1074114..74af690 100644
--- a/man/binom2.rhoUC.Rd
+++ b/man/binom2.rhoUC.Rd
@@ -11,14 +11,14 @@
}
\usage{
rbinom2.rho(n, mu1,
- mu2=if(exchangeable) mu1 else stop("'mu2' not specified"),
- rho=0, exchangeable=FALSE, twoCols=TRUE,
- colnames=if(twoCols) c("y1","y2") else c("00", "01", "10", "11"),
- ErrorCheck=TRUE)
+ mu2 = if(exchangeable) mu1 else stop("argument 'mu2' not specified"),
+ rho = 0, exchangeable = FALSE, twoCols = TRUE,
+ colnames = if(twoCols) c("y1","y2") else c("00", "01", "10", "11"),
+ ErrorCheck = TRUE)
dbinom2.rho(mu1,
- mu2=if(exchangeable) mu1 else stop("'mu2' not specified"),
- rho=0, exchangeable=FALSE,
- colnames=c("00", "01", "10", "11"), ErrorCheck=TRUE)
+ mu2 = if(exchangeable) mu1 else stop("'mu2' not specified"),
+ rho = 0, exchangeable = FALSE,
+ colnames = c("00", "01", "10", "11"), ErrorCheck = TRUE)
}
%- maybe also 'usage' for other objects documented here.
@@ -31,7 +31,7 @@ dbinom2.rho(mu1,
}
\item{mu1, mu2}{
The marginal probabilities.
- Only \code{mu1} is needed if \code{exchangeable=TRUE}.
+ Only \code{mu1} is needed if \code{exchangeable = TRUE}.
Values should be between 0 and 1.
}
@@ -94,19 +94,19 @@ dbinom2.rho(mu1,
# Example 1
(myrho <- rhobit(2, inverse = TRUE))
ymat = rbinom2.rho(nn <- 2000, mu1 = 0.8, rho = myrho, exch = TRUE)
-(mytab = table(ymat[,1], ymat[,2], dnn = c("Y1","Y2")))
+(mytab = table(ymat[, 1], ymat[, 2], dnn = c("Y1", "Y2")))
fit = vglm(ymat ~ 1, binom2.rho(exch = TRUE))
coef(fit, matrix = TRUE)
# Example 2
-bdata = data.frame(x = sort(runif(nn)))
-bdata = transform(bdata, mu1 = probit(-2+4*x, inverse = TRUE),
- mu2 = probit(-1+3*x, inverse = TRUE))
+bdata = data.frame(x2 = sort(runif(nn)))
+bdata = transform(bdata, mu1 = probit(-2+4*x2, inverse = TRUE),
+ mu2 = probit(-1+3*x2, inverse = TRUE))
dmat = with(bdata, dbinom2.rho(mu1, mu2, myrho))
ymat = with(bdata, rbinom2.rho(nn, mu1, mu2, myrho))
-fit2 = vglm(ymat ~ x, binom2.rho, bdata)
+fit2 = vglm(ymat ~ x2, binom2.rho, bdata)
coef(fit2, matrix = TRUE)
-\dontrun{ matplot(with(bdata, x), dmat, lty = 1:4, col = 1:4,
+\dontrun{ matplot(with(bdata, x2), dmat, lty = 1:4, col = 1:4,
type = "l", main = "Joint probabilities",
ylim = 0:1, lwd = 2, ylab = "Probability")
legend(x = 0.25, y = 0.9, lty = 1:4, col = 1:4, lwd = 2,
diff --git a/man/binomialff.Rd b/man/binomialff.Rd
index 0d714b3..a79ba5d 100644
--- a/man/binomialff.Rd
+++ b/man/binomialff.Rd
@@ -9,16 +9,17 @@
}
\usage{
-binomialff(link = "logit", earg = list(), dispersion = 1, mv = FALSE,
- onedpar = !mv, parallel = FALSE, zero = NULL)
+binomialff(link = "logit", dispersion = 1, mv = FALSE,
+ onedpar = !mv, parallel = FALSE, zero = NULL, bred = FALSE,
+ earg.link = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link, earg}{
- Link function and extra argument optionally used by the link function.
- See \code{\link{Links}} for more choices, and also
+ \item{link}{
+ Link function;
+ see \code{\link{Links}} and
\code{\link{CommonVGAMffArguments}} for more information.
@@ -65,6 +66,12 @@ binomialff(link = "logit", earg = list(), dispersion = 1, mv = FALSE,
are modelled as intercepts only. The values must be from the set
\{1,2,\ldots,\eqn{M}\}, where \eqn{M} is the number of columns of the
matrix response.
+ See \code{\link{CommonVGAMffArguments}} for more information.
+
+
+ }
+ \item{bred, earg.link}{
+ Details at \code{\link{CommonVGAMffArguments}}.
}
@@ -205,8 +212,8 @@ binomialff(link = "logit", earg = list(), dispersion = 1, mv = FALSE,
quasibinomialff()
quasibinomialff(link = "probit")
-shunua = hunua[sort.list(with(hunua, altitude)), ] # Sort by altitude
-fit = vglm(agaaus ~ poly(altitude, 2), binomialff(link = cloglog), shunua)
+shunua <- hunua[sort.list(with(hunua, altitude)), ] # Sort by altitude
+fit <- vglm(agaaus ~ poly(altitude, 2), binomialff(link = cloglog), shunua)
\dontrun{
plot(agaaus ~ jitter(altitude), shunua, col = "blue", ylab = "P(Agaaus = 1)",
main = "Presence/absence of Agathis australis", las = 1)
@@ -214,20 +221,20 @@ with(shunua, lines(altitude, fitted(fit), col = "orange", lwd = 2)) }
# Fit two species simultaneously
-fit2 = vgam(cbind(agaaus, kniexc) ~ s(altitude), binomialff(mv = TRUE), shunua)
+fit2 <- vgam(cbind(agaaus, kniexc) ~ s(altitude), binomialff(mv = TRUE), shunua)
with(shunua, matplot(altitude, fitted(fit2), type = "l",
main = "Two species response curves", las = 1))
# Shows that Fisher scoring can sometime fail. See Ridout (1990).
-ridout = data.frame(v = c(1000, 100, 10), r = c(4, 3, 3), n = c(5, 5, 5))
-(ridout = transform(ridout, logv = log(v)))
+ridout <- data.frame(v = c(1000, 100, 10), r = c(4, 3, 3), n = c(5, 5, 5))
+(ridout <- transform(ridout, logv = log(v)))
# The iterations oscillates between two local solutions:
-glm.fail = glm(r / n ~ offset(logv) + 1, weight = n,
- binomial(link = cloglog), ridout, trace = TRUE)
+glm.fail <- glm(r / n ~ offset(logv) + 1, weight = n,
+ binomial(link = 'cloglog'), ridout, trace = TRUE)
coef(glm.fail)
# vglm()'s half-stepping ensures the MLE of -5.4007 is obtained:
-vglm.ok = vglm(cbind(r, n-r) ~ offset(logv) + 1,
+vglm.ok <- vglm(cbind(r, n-r) ~ offset(logv) + 1,
binomialff(link = cloglog), ridout, trace = TRUE)
coef(vglm.ok)
}
diff --git a/man/binormal.Rd b/man/binormal.Rd
index 42aee8d..8d8e0c6 100644
--- a/man/binormal.Rd
+++ b/man/binormal.Rd
@@ -8,15 +8,13 @@
}
\usage{
-binormal(lmean1 = "identity", emean1 = list(),
- lmean2 = "identity", emean2 = list(),
- lsd1 = "loge", esd1 = list(),
- lsd2 = "loge", esd2 = list(),
- lrho = "rhobit", erho = list(),
+binormal(lmean1 = "identity", lmean2 = "identity",
+ lsd1 = "loge", lsd2 = "loge",
+ lrho = "rhobit",
imean1 = NULL, imean2 = NULL,
isd1 = NULL, isd2 = NULL,
irho = NULL, imethod = 1,
- equalmean = FALSE, equalsd = FALSE,
+ eq.mean = FALSE, eq.sd = FALSE,
zero = 3:5)
@@ -31,16 +29,11 @@ binormal(lmean1 = "identity", emean1 = list(),
standard deviations.
}
- \item{emean1, emean2, esd1, esd2, erho}{
- List. Extra argument for the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{imean1, imean2, isd1, isd2, irho, imethod, zero}{
See \code{\link{CommonVGAMffArguments}} for more information.
}
- \item{equalmean, equalsd}{
+ \item{eq.mean, eq.sd}{
Logical or formula.
Constrains the means or the standard deviations to be equal.
Only one of these arguments may be assigned a value.
@@ -61,6 +54,7 @@ binormal(lmean1 = "identity", emean1 = list(),
the form of a two-column matrix.
Fisher scoring is implemented.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -89,14 +83,15 @@ binormal(lmean1 = "identity", emean1 = list(),
\code{\link{normal1}},
\code{\link{gaussianff}}.
+
}
\examples{
nn <- 1000
-mydat = data.frame(x2 = runif(nn), x3 = runif(nn))
-mydat = transform(mydat, y1 = rnorm(nn, 1 + 2*x2),
- y2 = rnorm(nn, 3 + 4*x2))
-fit1 = vglm(cbind(y1, y2) ~ x2,
- binormal(equalsd = TRUE), data = mydat, trace = TRUE)
+bdata <- data.frame(x2 = runif(nn), x3 = runif(nn))
+bdata <- transform(bdata, y1 = rnorm(nn, 1 + 2*x2),
+ y2 = rnorm(nn, 3 + 4*x2))
+fit1 <- vglm(cbind(y1, y2) ~ x2,
+ binormal(eq.sd = TRUE), data = bdata, trace = TRUE)
coef(fit1, matrix = TRUE)
constraints(fit1)
summary(fit1)
diff --git a/man/bisa.Rd b/man/bisa.Rd
index 55e24c5..a670c8b 100644
--- a/man/bisa.Rd
+++ b/man/bisa.Rd
@@ -9,7 +9,6 @@
}
\usage{
bisa(lshape = "loge", lscale = "loge",
- eshape = list(), escale = list(),
ishape = NULL, iscale = 1, imethod = 1, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -21,11 +20,6 @@ bisa(lshape = "loge", lscale = "loge",
A log link is the default for both because they are positive.
}
- \item{escale, eshape}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{iscale, ishape}{
Initial values for \eqn{a} and \eqn{b}.
A \code{NULL} means an initial value is chosen internally using
diff --git a/man/bisaUC.Rd b/man/bisaUC.Rd
index 4819546..cf8e797 100644
--- a/man/bisaUC.Rd
+++ b/man/bisaUC.Rd
@@ -11,10 +11,10 @@
}
\usage{
-dbisa(x, shape, scale=1, log=FALSE)
-pbisa(q, shape, scale=1)
-qbisa(p, shape, scale=1)
-rbisa(n, shape, scale=1)
+dbisa(x, shape, scale = 1, log = FALSE)
+pbisa(q, shape, scale = 1)
+qbisa(p, shape, scale = 1)
+rbisa(n, shape, scale = 1)
}
\arguments{
\item{x, q}{vector of quantiles.}
diff --git a/man/borel.tanner.Rd b/man/borel.tanner.Rd
index 2a0fc39..de04958 100644
--- a/man/borel.tanner.Rd
+++ b/man/borel.tanner.Rd
@@ -8,7 +8,7 @@
}
\usage{
-borel.tanner(Qsize = 1, link = "logit", earg = list(), imethod = 1)
+borel.tanner(Qsize = 1, link = "logit", imethod = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -17,9 +17,9 @@ borel.tanner(Qsize = 1, link = "logit", earg = list(), imethod = 1)
queue size.
}
- \item{link, earg}{
- Link function and extra argument for the parameter.
- See \code{\link{Links}} for more choices and for general information.
+ \item{link}{
+ Link function for the parameter;
+ see \code{\link{Links}} for more choices and for general information.
}
\item{imethod}{
diff --git a/man/bortUC.Rd b/man/bortUC.Rd
index 2303540..6a3aab9 100644
--- a/man/bortUC.Rd
+++ b/man/bortUC.Rd
@@ -12,10 +12,10 @@
}
\usage{
-dbort(x, Qsize=1, a=0.5, log=FALSE)
-%pbort(q, Qsize=1, a=0.5)
-%qbort(p, Qsize=1, a=0.5)
-rbort(n, Qsize=1, a=0.5)
+dbort(x, Qsize = 1, a = 0.5, log = FALSE)
+%pbort(q, Qsize = 1, a = 0.5)
+%qbort(p, Qsize = 1, a = 0.5)
+rbort(n, Qsize = 1, a = 0.5)
}
\arguments{
\item{x}{vector of quantiles.}
diff --git a/man/brat.Rd b/man/brat.Rd
index f82fe84..fdf87d3 100644
--- a/man/brat.Rd
+++ b/man/brat.Rd
@@ -37,24 +37,24 @@ brat(refgp = "last", refvalue = 1, init.alpha = 1)
The Bradley Terry model involves \eqn{M+1} competitors
who either win or lose against each other (no draws/ties
allowed in this implementation--see \code{\link{bratt}}
- if there are ties). The probability that Competitor
+ if there are ties). The probability that Competitor
\eqn{i} beats Competitor \eqn{j} is \eqn{\alpha_i /
(\alpha_i+\alpha_j)}{alpha_i / (alpha_i + alpha_j)},
where all the \eqn{\alpha}{alpha}s are positive.
Loosely, the \eqn{\alpha}{alpha}s can be thought of as
- the competitors' `abilities'. For identifiability, one
+ the competitors' `abilities'. For identifiability, one
of the \eqn{\alpha_i}{alpha_i} is set to a known value
- \code{refvalue}, e.g., 1. By default, this function
+ \code{refvalue}, e.g., 1. By default, this function
chooses the last competitor to have this reference value.
The data can be represented in the form of a \eqn{M+1}
by \eqn{M+1} matrix of counts, where winners are the
- rows and losers are the columns. However, this is not
+ rows and losers are the columns. However, this is not
the way the data should be inputted (see below).
Excluding the reference value/group, this function
chooses \eqn{\log(\alpha_j)}{log(alpha_j)} as the
- \eqn{M} linear predictors. The log link ensures that
+ \eqn{M} linear predictors. The log link ensures that
the \eqn{\alpha}{alpha}s are positive.
@@ -95,12 +95,12 @@ than this function.
\note{
The function \code{\link{Brat}} is useful for coercing
a \eqn{M+1} by \eqn{M+1} matrix of counts into a one-row
- matrix suitable for \code{brat}. Diagonal elements are
+ matrix suitable for \code{brat}. Diagonal elements are
skipped, and the usual S order of \code{c(a.matrix)}
of elements is used. There should be no missing values
apart from the diagonal elements of the square matrix.
The matrix should have winners as the rows, and losers
- as the columns. In general, the response should be a
+ as the columns. In general, the response should be a
1-row matrix with \eqn{M(M+1)} columns.
@@ -117,7 +117,7 @@ than this function.
}
\section{Warning }{
Presently, the residuals are wrong, and the prior weights
- are not handled correctly. Ideally, the total number of
+ are not handled correctly. Ideally, the total number of
counts should be the prior weights, after the response has
been converted to proportions. This would make it similar
to family functions such as \code{\link{multinomial}}
@@ -136,20 +136,20 @@ than this function.
}
\examples{
# Citation statistics: being cited is a 'win'; citing is a 'loss'
-journal = c("Biometrika", "Comm.Statist", "JASA", "JRSS-B")
-mat = matrix(c( NA, 33, 320, 284,
- 730, NA, 813, 276,
- 498, 68, NA, 325,
- 221, 17, 142, NA), 4,4)
-dimnames(mat) = list(winner = journal, loser = journal)
-fit = vglm(Brat(mat) ~ 1, brat(refgp = 1), trace = TRUE)
-fit = vglm(Brat(mat) ~ 1, brat(refgp = 1), trace = TRUE, crit = "coef")
+journal <- c("Biometrika", "Comm.Statist", "JASA", "JRSS-B")
+mat <- matrix(c( NA, 33, 320, 284,
+ 730, NA, 813, 276,
+ 498, 68, NA, 325,
+ 221, 17, 142, NA), 4, 4)
+dimnames(mat) <- list(winner = journal, loser = journal)
+fit <- vglm(Brat(mat) ~ 1, brat(refgp = 1), trace = TRUE)
+fit <- vglm(Brat(mat) ~ 1, brat(refgp = 1), trace = TRUE, crit = "coef")
summary(fit)
c(0, coef(fit)) # Log-abilities (in order of "journal")
c(1, Coef(fit)) # Abilities (in order of "journal")
fitted(fit) # Probabilities of winning in awkward form
-(check = InverseBrat(fitted(fit))) # Probabilities of winning
-check + t(check) # Should be 1's in the off-diagonals
+(check <- InverseBrat(fitted(fit))) # Probabilities of winning
+check + t(check) # Should be 1's in the off-diagonals
}
\keyword{models}
\keyword{regression}
diff --git a/man/bratt.Rd b/man/bratt.Rd
index 90b229f..79f559e 100644
--- a/man/bratt.Rd
+++ b/man/bratt.Rd
@@ -133,34 +133,34 @@ bratt(refgp = "last", refvalue = 1, init.alpha = 1, i0 = 0.01)
}
\examples{
# citation statistics: being cited is a 'win'; citing is a 'loss'
-journal = c("Biometrika", "Comm.Statist", "JASA", "JRSS-B")
-mat = matrix(c( NA, 33, 320, 284,
- 730, NA, 813, 276,
- 498, 68, NA, 325,
- 221, 17, 142, NA), 4,4)
-dimnames(mat) = list(winner = journal, loser = journal)
+journal <- c("Biometrika", "Comm.Statist", "JASA", "JRSS-B")
+mat <- matrix(c( NA, 33, 320, 284,
+ 730, NA, 813, 276,
+ 498, 68, NA, 325,
+ 221, 17, 142, NA), 4, 4)
+dimnames(mat) <- list(winner = journal, loser = journal)
# Add some ties. This is fictitional data.
ties = 5 + 0*mat
ties[2,1] = ties[1,2] = 9
# Now fit the model
-fit = vglm(Brat(mat, ties) ~ 1, bratt(refgp = 1), trace = TRUE)
-fit = vglm(Brat(mat, ties) ~ 1, bratt(refgp = 1), trace = TRUE, crit = "coef")
+fit <- vglm(Brat(mat, ties) ~ 1, bratt(refgp = 1), trace = TRUE)
+fit <- vglm(Brat(mat, ties) ~ 1, bratt(refgp = 1), trace = TRUE, crit = "coef")
summary(fit)
c(0, coef(fit)) # Log-abilities (in order of "journal"); last is log(alpha0)
c(1, Coef(fit)) # Abilities (in order of "journal"); last is alpha0
-fit at misc$alpha # alpha_1,...,alpha_M
+fit at misc$alpha # alpha_1,...,alpha_M
fit at misc$alpha0 # alpha_0
-fitted(fit) # Probabilities of winning and tying, in awkward form
+fitted(fit) # Probabilities of winning and tying, in awkward form
predict(fit)
-(check = InverseBrat(fitted(fit))) # Probabilities of winning
-qprob = attr(fitted(fit), "probtie") # Probabilities of a tie
-qprobmat = InverseBrat(c(qprob), NCo=nrow(ties)) # Probabilities of a tie
-check + t(check) + qprobmat # Should be 1's in the off-diagonals
+(check <- InverseBrat(fitted(fit))) # Probabilities of winning
+qprob <- attr(fitted(fit), "probtie") # Probabilities of a tie
+qprobmat <- InverseBrat(c(qprob), NCo=nrow(ties)) # Probabilities of a tie
+check + t(check) + qprobmat # Should be 1's in the off-diagonals
}
\keyword{models}
\keyword{regression}
diff --git a/man/cardUC.Rd b/man/cardUC.Rd
index 5f3c6d2..9d2f938 100644
--- a/man/cardUC.Rd
+++ b/man/cardUC.Rd
@@ -12,7 +12,7 @@
}
\usage{
-dcard(x, mu, rho, log=FALSE)
+dcard(x, mu, rho, log = FALSE)
pcard(q, mu, rho)
qcard(p, mu, rho, tolerance = 1e-07, maxits = 500)
rcard(n, mu, rho, ...)
diff --git a/man/cardioid.Rd b/man/cardioid.Rd
index aeb05af..61193cd 100644
--- a/man/cardioid.Rd
+++ b/man/cardioid.Rd
@@ -7,9 +7,8 @@
cardioid distribution by maximum likelihood estimation.
}
\usage{
-cardioid(lmu = "elogit", lrho = "elogit",
- emu = if(lmu == "elogit") list(min = 0, max = 2*pi) else list(),
- erho = if(lmu == "elogit") list(min = -0.5, max = 0.5) else list(),
+cardioid(lmu = elogit(min = 0, max = 2*pi),
+ lrho = elogit(min = -0.5, max = 0.5),
imu = NULL, irho = 0.3, nsimEIM = 100, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -20,11 +19,6 @@ cardioid(lmu = "elogit", lrho = "elogit",
See \code{\link{Links}} for more choices.
}
- \item{emu, erho}{
- List. Extra argument for each of the link functions.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{imu, irho}{
Initial values.
A \code{NULL} means an initial value is chosen internally.
@@ -65,6 +59,7 @@ cardioid(lmu = "elogit", lrho = "elogit",
\code{\link{rrvglm}}
and \code{\link{vgam}}.
+
}
\references{
@@ -84,6 +79,7 @@ Singapore: World Scientific.
The user is therefore encouraged to try different starting values,
i.e., make use of \code{imu} and \code{irho}.
+
}
\seealso{
@@ -91,13 +87,15 @@ Singapore: World Scientific.
\code{\link{elogit}},
\code{\link{vonmises}}.
+
\pkg{CircStats} and \pkg{circular} currently have a lot more
R functions for circular data than the \pkg{VGAM} package.
+
}
\examples{
-cdata = data.frame(y = rcard(n = 1000, mu = 4, rho = 0.45))
-fit = vglm(y ~ 1, cardioid, cdata, trace=TRUE)
+cdata <- data.frame(y = rcard(n = 1000, mu = 4, rho = 0.45))
+fit <- vglm(y ~ 1, cardioid, cdata, trace = TRUE)
coef(fit, matrix=TRUE)
Coef(fit)
c(with(cdata, mean(y)), head(fitted(fit), 1))
diff --git a/man/cauchit.Rd b/man/cauchit.Rd
index bf0aa44..1169e68 100644
--- a/man/cauchit.Rd
+++ b/man/cauchit.Rd
@@ -8,46 +8,26 @@
}
\usage{
-cauchit(theta, earg = list(bvalue= .Machine$double.eps),
+cauchit(theta, bvalue = .Machine$double.eps,
inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{theta}{
Numeric or character.
- See below for further details.
+ See below for further details.
}
- \item{earg}{
- List. Extra argument for passing in additional information.
- Values of \code{theta} which are less than or equal to 0 can be
- replaced by the \code{bvalue} component of the list \code{earg}
- before computing the link function value.
- Values of \code{theta} which are greater than or equal to 1 can be
- replaced by 1 minus the \code{bvalue} component of the list \code{earg}
- before computing the link function value.
- The component name \code{bvalue} stands for ``boundary value''.
- See \code{\link{Links}} for general information about \code{earg}.
+ \item{bvalue}{
+ See \code{\link{Links}}.
- }
- \item{inverse}{
- Logical. If \code{TRUE} the inverse function is computed.
}
- \item{deriv}{
- Order of the derivative. Integer with value 0, 1 or 2.
+ \item{inverse, deriv, short, tag}{
+ Details at \code{\link{Links}}.
- }
- \item{short}{
- Used for labelling the \code{blurb} slot of a \code{\link{vglmff-class}}
- object.
}
- \item{tag}{
- Used for labelling the linear/additive predictor in the
- \code{initialize} slot of a \code{\link{vglmff-class}} object.
- Contains a little more information if \code{TRUE}.
- }
}
\details{
This link function is an alternative link function for parameters that
@@ -58,8 +38,8 @@ cauchit(theta, earg = list(bvalue= .Machine$double.eps),
(see examples below).
Numerical values of \code{theta} close to 0 or 1 or out of range result
- in \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}. The arguments
- \code{short} and \code{tag} are used only if \code{theta} is character.
+ in \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
+
}
\value{
@@ -82,7 +62,8 @@ cauchit(theta, earg = list(bvalue= .Machine$double.eps),
\note{
Numerical instability may occur when \code{theta} is close to 1 or 0.
- One way of overcoming this is to use \code{earg}.
+ One way of overcoming this is to use \code{bvalue}.
+
As mentioned above,
in terms of the threshold approach with cumulative probabilities for
@@ -98,27 +79,29 @@ cauchit(theta, earg = list(bvalue= .Machine$double.eps),
\code{\link{loge}},
\code{\link{cauchy}},
\code{\link{cauchy1}}.
+
+
}
\examples{
-p = seq(0.01, 0.99, by=0.01)
+p <- seq(0.01, 0.99, by=0.01)
cauchit(p)
max(abs(cauchit(cauchit(p), inverse = TRUE) - p)) # Should be 0
-p = c(seq(-0.02, 0.02, by=0.01), seq(0.97, 1.02, by=0.01))
+p <- c(seq(-0.02, 0.02, by=0.01), seq(0.97, 1.02, by = 0.01))
cauchit(p) # Has no NAs
\dontrun{
par(mfrow = c(2, 2), lwd = (mylwd <- 2))
-y = seq(-4, 4, length = 100)
-p = seq(0.01, 0.99, by = 0.01)
+y <- seq(-4, 4, length = 100)
+p <- seq(0.01, 0.99, by = 0.01)
for(d in 0:1) {
matplot(p, cbind(logit(p, deriv = d), probit(p, deriv = d)),
type = "n", col = "purple", ylab = "transformation",
- las=1, main = if (d == 0) "Some probability link functions"
+ las = 1, main = if (d == 0) "Some probability link functions"
else "First derivative")
- lines(p, logit(p, deriv = d), col = "limegreen")
- lines(p, probit(p, deriv = d), col = "purple")
+ lines(p, logit(p, deriv = d), col = "limegreen")
+ lines(p, probit(p, deriv = d), col = "purple")
lines(p, cloglog(p, deriv = d), col = "chocolate")
lines(p, cauchit(p, deriv = d), col = "tan")
if (d == 0) {
@@ -142,7 +125,7 @@ for(d in 0) {
if (d == 0) {
abline(h = 0.5, v = 0, lty = "dashed")
legend(-4, 1, c("logit", "probit", "cloglog", "cauchit"), lwd = mylwd,
- col = c("limegreen","purple","chocolate", "tan"))
+ col = c("limegreen", "purple", "chocolate", "tan"))
}
}
par(lwd = 1)
diff --git a/man/cauchy.Rd b/man/cauchy.Rd
index 6b3b20c..fe99e07 100644
--- a/man/cauchy.Rd
+++ b/man/cauchy.Rd
@@ -9,12 +9,12 @@
}
\usage{
-cauchy(llocation="identity", lscale="loge", elocation=list(),
- escale=list(), ilocation=NULL, iscale=NULL,
- iprobs = seq(0.2, 0.8, by=0.2),
- imethod=1, nsimEIM=NULL, zero=2)
-cauchy1(scale.arg=1, llocation="identity",
- elocation=list(), ilocation=NULL, imethod=1)
+cauchy(llocation = "identity", lscale = "loge",
+ ilocation = NULL, iscale = NULL,
+ iprobs = seq(0.2, 0.8, by = 0.2),
+ imethod = 1, nsimEIM = NULL, zero = 2)
+cauchy1(scale.arg = 1, llocation = "identity",
+ ilocation = NULL, imethod = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -24,11 +24,6 @@ cauchy1(scale.arg=1, llocation="identity",
See \code{\link{Links}} for more choices.
}
- \item{elocation, escale}{
- List. Extra argument for each link.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{ilocation, iscale}{
Optional initial value for \eqn{a}{a} and \eqn{b}{b}.
By default, an initial value is chosen internally for each.
@@ -92,6 +87,7 @@ cauchy1(scale.arg=1, llocation="identity",
make full use of \code{imethod}, \code{ilocation}, \code{iscale}
etc.
+
}
\references{
@@ -139,23 +135,24 @@ Observed versus expected Fisher information.
\code{\link{cauchit}},
\code{\link{studentt}}.
+
}
\examples{
# Both location and scale parameters unknown
cdata1 <- data.frame(x = runif(nn <- 1000))
cdata1 <- transform(cdata1, loc = exp(1+0.5*x), scale = exp(1))
cdata1 <- transform(cdata1, y = rcauchy(nn, loc, scale))
-fit <- vglm(y ~ x, cauchy(lloc="loge"), cdata1, trace = TRUE)
+fit <- vglm(y ~ x, cauchy(lloc = "loge"), cdata1, trace = TRUE)
coef(fit, matrix = TRUE)
-head(fitted(fit)) # Location estimates
+head(fitted(fit)) # Location estimates
summary(fit)
# Location parameter unknown
set.seed(123)
cdata2 <- data.frame(x = runif(nn <- 500))
-cdata2 <- transform(cdata2, loc = 1+0.5*x, scale = 0.4)
+cdata2 <- transform(cdata2, loc = 1 + 0.5 * x, scale = 0.4)
cdata2 <- transform(cdata2, y = rcauchy(nn, loc, scale))
-fit <- vglm(y ~ x, cauchy1(scale = 0.4), cdata2, trace = TRUE, crit = "c")
+fit <- vglm(y ~ x, cauchy1(scale = 0.4), cdata2, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
}
\keyword{models}
diff --git a/man/ccoef.Rd b/man/ccoef.Rd
index b46ebea..4b0d168 100644
--- a/man/ccoef.Rd
+++ b/man/ccoef.Rd
@@ -15,9 +15,11 @@ ccoef(object, ...)
\arguments{
\item{object}{ An object for which the extraction of canonical
coefficients is meaningful.
+
}
\item{\dots}{ Other arguments fed into the specific
methods function of the model.
+
}
}
\details{
@@ -26,12 +28,15 @@ ccoef(object, ...)
the latent variables. They are highly interpretable in ecology,
and are looked at as weights or loadings.
+
They are also applicable for reduced-rank VGLMs.
+
}
\value{
The value returned depends specifically on the methods function invoked.
+
}
\references{
Yee, T. W. and Hastie, T. J. (2003)
@@ -39,16 +44,19 @@ Reduced-rank vector generalized linear models.
\emph{Statistical Modelling},
\bold{3}, 15--41.
+
Yee, T. W. (2004)
A new technique for maximum-likelihood
canonical Gaussian ordination.
\emph{Ecological Monographs},
\bold{74}, 685--701.
+
Yee, T. W. (2006)
Constrained additive ordination.
\emph{Ecology}, \bold{87}, 203--213.
+
}
\author{ Thomas W. Yee }
@@ -67,6 +75,7 @@ Constrained additive ordination.
fitting quadratic ordination models is whether \code{EqualTolerances}
is \code{TRUE} or \code{FALSE}. See Yee (2004) for details.
+
}
\seealso{
@@ -75,16 +84,16 @@ Constrained additive ordination.
\code{ccoef.cao},
\code{\link[stats]{coef}}.
+
}
\examples{
-\dontrun{
-set.seed(111) # This leads to the global solution
+\dontrun{ set.seed(111) # This leads to the global solution
hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
Trocterr, Zoraspin) ~
WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- fam = quasipoissonff, data = hspider, Crow1positive=FALSE)
+ fam = quasipoissonff, data = hspider, Crow1positive = FALSE)
ccoef(p1)
}
}
diff --git a/man/cdf.lmscreg.Rd b/man/cdf.lmscreg.Rd
index 7c6dd7b..63e9a07 100644
--- a/man/cdf.lmscreg.Rd
+++ b/man/cdf.lmscreg.Rd
@@ -15,12 +15,18 @@ cdf.lmscreg(object, newdata = NULL, ...)
an object produced by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}} with a family function beginning with
\code{"lms."}.
+
+
}
\item{newdata}{ Data frame where the predictions are
to be made. If missing, the original data is used.
+
+
}
\item{\dots}{ Parameters which are passed into functions such as
\code{cdf.lms.yjn}.
+
+
}
}
\details{
@@ -28,20 +34,27 @@ cdf.lmscreg(object, newdata = NULL, ...)
probabilities associated with the quantiles \code{newdata}.
For example, a value near 0.75 means it is close to the upper quartile
of the distribution.
+
+
}
\value{
A vector of CDF values lying in [0,1].
+
+
}
\references{
+
Yee, T. W. (2004)
Quantile regression via vector generalized additive models.
\emph{Statistics in Medicine}, \bold{23}, 2295--2315.
+
Documentation accompanying the \pkg{VGAM} package at
\url{http://www.stat.auckland.ac.nz/~yee}
contains further information and examples.
+
}
\author{ Thomas W. Yee }
\note{
@@ -49,9 +62,11 @@ contains further information and examples.
are returned. The opposite is performed by
\code{\link{qtplot.lmscreg}}.
+
The CDF values of the model have been placed in
\code{@post$cdf} when the model was fitted.
+
}
\seealso{
@@ -60,15 +75,17 @@ The CDF values of the model have been placed in
\code{\link{lms.bcn}},
\code{\link{lms.bcg}},
\code{\link{lms.yjn}}.
+
+
}
\examples{
-fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bmi.nz)
+fit <- vgam(BMI ~ s(age, df=c(4, 2)), lms.bcn(zero = 1), data = bmi.nz)
head(fit at post$cdf)
-head(cdf(fit)) # Same
-head(fit at y)
+head(cdf(fit)) # Same
+head(depvar(fit))
head(fitted(fit))
-cdf(fit, data.frame(age=c(31.5,39), BMI=c(28.4,24)))
+cdf(fit, data.frame(age = c(31.5, 39), BMI = c(28.4, 24)))
}
\keyword{models}
\keyword{regression}
diff --git a/man/cennormal1.Rd b/man/cennormal1.Rd
index e592b28..153f3e0 100644
--- a/man/cennormal1.Rd
+++ b/man/cennormal1.Rd
@@ -8,13 +8,12 @@
}
\usage{
-cennormal1(lmu = "identity", lsd = "loge",
- emu = list(), esd = list(), imethod = 1, zero = 2)
+cennormal1(lmu = "identity", lsd = "loge", imethod = 1, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{lmu, lsd, emu, esd}{
- Parameter link functions and its extra arguments,
+ \item{lmu, lsd}{
+ Parameter link functions
applied to the mean and standard deviation parameters.
See \code{\link{Links}} for more choices.
The standard deviation is a positive quantity, therefore a log link
@@ -78,23 +77,25 @@ cennormal1(lmu = "identity", lsd = "loge",
}
\examples{
-cdata = data.frame(x2 = runif(nn <- 1000)) # ystar are true values
-cdata = transform(cdata, ystar = rnorm(nn, m = 100 + 15 * x2, sd = exp(3)))
-\dontrun{with(cdata, hist(ystar))}
-cdata = transform(cdata, L = runif(nn, 80, 90), # Lower censoring points
- U = runif(nn, 130, 140)) # Upper censoring points
-cdata = transform(cdata, y = pmax(L, ystar)) # Left censored
-cdata = transform(cdata, y = pmin(U, y)) # Right censored
-\dontrun{with(cdata, hist(y))}
-Extra = list(leftcensored = with(cdata, ystar < L),
- rightcensored = with(cdata, ystar > U))
-fit1 = vglm(y ~ x2, cennormal1, cdata, crit = "c", extra = Extra, trace = TRUE)
-fit2 = vglm(y ~ x2, tobit(Lower = with(cdata, L), Upper = with(cdata, U)),
+\dontrun{
+cdata <- data.frame(x2 = runif(nn <- 1000)) # ystar are true values
+cdata <- transform(cdata, ystar = rnorm(nn, m = 100 + 15 * x2, sd = exp(3)))
+with(cdata, hist(ystar))
+cdata <- transform(cdata, L = runif(nn, 80, 90), # Lower censoring points
+ U = runif(nn, 130, 140)) # Upper censoring points
+cdata <- transform(cdata, y = pmax(L, ystar)) # Left censored
+cdata <- transform(cdata, y = pmin(U, y)) # Right censored
+with(cdata, hist(y))
+Extra <- list(leftcensored = with(cdata, ystar < L),
+ rightcensored = with(cdata, ystar > U))
+fit1 <- vglm(y ~ x2, cennormal1, cdata, crit = "c", extra = Extra, trace = TRUE)
+fit2 <- vglm(y ~ x2, tobit(Lower = with(cdata, L), Upper = with(cdata, U)),
cdata, crit = "c", trace = TRUE)
coef(fit1, matrix = TRUE)
max(abs(coef(fit1, matrix = TRUE) - coef(fit2, matrix = TRUE))) # Should be 0
names(fit1 at extra)
}
+}
\keyword{models}
\keyword{regression}
diff --git a/man/cenpoisson.Rd b/man/cenpoisson.Rd
index 59c9e32..f945d0c 100644
--- a/man/cenpoisson.Rd
+++ b/man/cenpoisson.Rd
@@ -9,18 +9,18 @@
}
\usage{
-cenpoisson(link = "loge", earg = list(), imu = NULL)
+cenpoisson(link = "loge", imu = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link, earg}{
- Link function and its extra argument applied to the mean.
- See \code{\link{Links}} for more choices.
+ \item{link}{
+ Link function applied to the mean;
+ see \code{\link{Links}} for more choices.
}
\item{imu}{
- Optional initial value.
- See \code{\link{CommonVGAMffArguments}} for more information.
+ Optional initial value;
+ see \code{\link{CommonVGAMffArguments}} for more information.
}
}
diff --git a/man/cgo.Rd b/man/cgo.Rd
index c29fdc0..fc564b6 100644
--- a/man/cgo.Rd
+++ b/man/cgo.Rd
@@ -18,9 +18,13 @@ because CGO (for \emph{canonical Gaussian ordination}) is a confusing
and inaccurate name.
CQO (for \emph{constrained quadratic ordination}) is better.
This new nomenclature described in Yee (2006).
+
+
}
\value{
Nothing is returned; an error message is issued.
+
+
}
\references{
Yee, T. W. (2004)
@@ -45,6 +49,7 @@ The code, therefore, in Yee (2004) will not run without changing the
\seealso{
\code{\link{cqo}}.
+
}
\examples{
diff --git a/man/cgumbel.Rd b/man/cgumbel.Rd
index 6fc868a..7aa49f0 100644
--- a/man/cgumbel.Rd
+++ b/man/cgumbel.Rd
@@ -9,8 +9,8 @@
}
\usage{
-cgumbel(llocation = "identity", lscale = "loge", elocation = list(),
- escale = list(), iscale=NULL, mean=TRUE, percentiles=NULL, zero=2)
+cgumbel(llocation = "identity", lscale = "loge",
+ iscale = NULL, mean = TRUE, percentiles = NULL, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -21,11 +21,6 @@ cgumbel(llocation = "identity", lscale = "loge", elocation = list(),
See \code{\link{Links}} for more choices.
}
- \item{elocation, escale}{
- Extra argument for the respective links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{iscale}{
Numeric and positive.
Initial value for \eqn{scale}. Recycled to the appropriate length.
diff --git a/man/chinese.nz.Rd b/man/chinese.nz.Rd
index c86a2a4..06f9b23 100644
--- a/man/chinese.nz.Rd
+++ b/man/chinese.nz.Rd
@@ -22,26 +22,53 @@
The second value of 4583 looks erroneous, as seen by the plot below.
+ Historically, there was a large exodus of Chinese from the Guangdong
+ region starting in the mid-1800s to the gold fields of
+ South Island of New Zealand,
+ California,
+ and Southern Australia, etc.
+ Racial discrimination then meant that only men were allowed
+ entry, to hinder permanent settlement.
+ In the case of New Zealand, the government relaxed its
+ immigration laws after WWII to allow wives of Chinese already in NZ to join them
+ because China had been among the Allied powers.
+ Gradual relaxation in the immigration and an influx during the 1980s
+ meant the Chinese population became increasingly demographically
+ normal over time.
+
+
}
%\source{
%}
\references{
- Page 6 of \emph{Aliens At My Table: Asians as New Zealanders see them}
+ Page 6 of \emph{Aliens At My Table: Asians as New Zealanders See Them}
by M. Ip and N. Murphy,
(2005), Penguin.
}
\examples{
-\dontrun{ plot(female/(male+female) ~ year, chinese.nz, type = "b",
+\dontrun{ par(mfrow = c(1, 2))
+plot(female/(male+female) ~ year, chinese.nz, type = "b",
ylab = "Proportion", col = "blue", las = 1,
main = "Proportion of NZ Chinese that are female")
-abline(h = 0.5, lty = "dashed")
+abline(h = 0.5, lty = "dashed", col = "gray")
+
+fit1.cnz = vglm(cbind(female, male) ~ year, binomialff, chinese.nz)
+fit2.cnz = vglm(cbind(female, male) ~ poly(year, 2), binomialff, chinese.nz)
+fit4.cnz = vglm(cbind(female, male) ~ bs(year, 4), binomialff, chinese.nz)
+
+lines(fitted(fit1.cnz) ~ year, chinese.nz, col = "purple")
+lines(fitted(fit2.cnz) ~ year, chinese.nz, col = "green")
+lines(fitted(fit4.cnz) ~ year, chinese.nz, col = "orange")
+legend("bottomright", col = c("purple", "green", "orange"),
+ lty = 1, leg = c("linear", "quadratic", "B-spline"))
+
plot(100*(male+female)/nz ~ year, chinese.nz, type = "b", ylab = "Percent",
ylim = c(0, max(100*(male+female)/nz)), col = "blue", las = 1,
main = "Percent of NZers that are Chinese")
-abline(h = 0, lty = "dashed") }
+abline(h = 0, lty = "dashed", col = "gray") }
}
\keyword{datasets}
diff --git a/man/chisq.Rd b/man/chisq.Rd
index 25fd010..3d43f2e 100644
--- a/man/chisq.Rd
+++ b/man/chisq.Rd
@@ -8,11 +8,11 @@
}
\usage{
-chisq(link = "loge", earg = list())
+chisq(link = "loge", zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link, earg}{
+ \item{link, zero}{
See \code{\link{CommonVGAMffArguments}} for information.
}
@@ -23,6 +23,7 @@ chisq(link = "loge", earg = list())
Being positive, a log link is used by default.
Fisher scoring is used.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -41,7 +42,7 @@ New York: Wiley-Interscience, Third edition.
\author{ T. W. Yee }
\note{
- Matrix responses are permitted.
+ Multiple responses are permitted.
There may be convergence problems if the degrees of freedom
is very large or close to zero.
@@ -51,6 +52,7 @@ New York: Wiley-Interscience, Third edition.
\code{\link[stats]{Chisquare}}.
\code{\link{normal1}}.
+
}
\examples{
cdata <- data.frame(x2 = runif(nn <- 1000))
diff --git a/man/cloglog.Rd b/man/cloglog.Rd
index c068323..6cbef03 100644
--- a/man/cloglog.Rd
+++ b/man/cloglog.Rd
@@ -9,7 +9,7 @@
}
\usage{
-cloglog(theta, earg = list(), inverse = FALSE, deriv = 0,
+cloglog(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE)
}
%- maybe also 'usage' for other objects documented here.
@@ -19,35 +19,13 @@ cloglog(theta, earg = list(), inverse = FALSE, deriv = 0,
See below for further details.
}
- \item{earg}{
- Optional list. Extra argument for passing in additional information.
- Values of \code{theta} which are less than or equal to 0 can be
- replaced by the \code{bvalue} component of the list \code{earg}
- before computing the link function value.
- Values of \code{theta} which are greater than or equal to 1 can be
- replaced by 1 minus the \code{bvalue} component of the list \code{earg}
- before computing the link function value.
- The component name \code{bvalue} stands for ``boundary value''.
- See \code{\link{Links}} for general information about \code{earg}.
+ \item{bvalue}{
+ See \code{\link{Links}} for general information about links.
}
- \item{inverse}{
- Logical. If \code{TRUE} the inverse function is computed.
+ \item{inverse, deriv, short, tag}{
+ Details at \code{\link{Links}}.
- }
- \item{deriv}{
- Order of the derivative. Integer with value 0, 1 or 2.
-
- }
- \item{short}{
- Used for labelling the \code{blurb} slot of a
- \code{\link{vglmff-class}} object.
-
- }
- \item{tag}{
- Used for labelling the linear/additive predictor in the
- \code{initialize} slot of a \code{\link{vglmff-class}} object.
- Contains a little more information if \code{TRUE}.
}
}
@@ -55,8 +33,8 @@ cloglog(theta, earg = list(), inverse = FALSE, deriv = 0,
The complementary log-log link function is commonly used for parameters
that lie in the unit interval. Numerical values of \code{theta}
close to 0 or 1 or out of range result in \code{Inf}, \code{-Inf},
- \code{NA} or \code{NaN}. The arguments \code{short} and \code{tag}
- are used only if \code{theta} is character.
+ \code{NA} or \code{NaN}.
+
}
\value{
@@ -81,22 +59,26 @@ cloglog(theta, earg = list(), inverse = FALSE, deriv = 0,
\note{
Numerical instability may occur when \code{theta} is close to 1 or 0.
- One way of overcoming this is to use \code{earg}.
+ One way of overcoming this is to use \code{bvalue}.
+
Changing 1s to 0s and 0s to 1s in the response means that effectively
a loglog link is fitted. That is, tranform \eqn{y} by \eqn{1-y}.
That's why only one of \code{\link{cloglog}}
and \code{loglog} is written.
+
With constrained ordination (e.g., \code{\link{cqo}} and
\code{\link{cao}}) used with \code{\link{binomialff}}, a complementary
log-log link function is preferred over the default \code{\link{logit}}
link, for a good reason. See the example below.
+
In terms of the threshold approach with cumulative probabilities for
an ordinal response this link function corresponds to the extreme
value distribution.
+
}
\seealso{
@@ -107,35 +89,35 @@ cloglog(theta, earg = list(), inverse = FALSE, deriv = 0,
}
\examples{
-p = seq(0.01, 0.99, by=0.01)
+p <- seq(0.01, 0.99, by = 0.01)
cloglog(p)
-max(abs(cloglog(cloglog(p), inverse=TRUE) - p)) # Should be 0
+max(abs(cloglog(cloglog(p), inverse = TRUE) - p)) # Should be 0
-p = c(seq(-0.02, 0.02, by=0.01), seq(0.97, 1.02, by=0.01))
-cloglog(p) # Has NAs
-cloglog(p, earg=list(bvalue= .Machine$double.eps)) # Has no NAs
+p <- c(seq(-0.02, 0.02, by = 0.01), seq(0.97, 1.02, by = 0.01))
+cloglog(p) # Has NAs
+cloglog(p, bvalue = .Machine$double.eps) # Has no NAs
\dontrun{
-p = seq(0.01, 0.99, by=0.01)
-plot(p, logit(p), type="l", col="limegreen", ylab="transformation",
- lwd=2, las=1, main="Some probability link functions")
-lines(p, probit(p), col="purple", lwd=2)
-lines(p, cloglog(p), col="chocolate", lwd=2)
-lines(p, cauchit(p), col="tan", lwd=2)
-abline(v=0.5, h=0, lty="dashed")
+p <- seq(0.01, 0.99, by = 0.01)
+plot(p, logit(p), type = "l", col = "limegreen", ylab = "transformation",
+ lwd = 2, las = 1, main = "Some probability link functions")
+lines(p, probit(p), col = "purple", lwd = 2)
+lines(p, cloglog(p), col = "chocolate", lwd = 2)
+lines(p, cauchit(p), col = "tan", lwd = 2)
+abline(v = 0.5, h = 0, lty = "dashed")
legend(0.1, 4, c("logit", "probit", "cloglog", "cauchit"),
- col=c("limegreen","purple","chocolate", "tan"), lwd=2)
+ col=c("limegreen","purple","chocolate", "tan"), lwd = 2)
}
\dontrun{
# This example shows that a cloglog link is preferred over the logit
n = 500; p = 5; S = 3; Rank = 1 # Species packing model:
-mydata = rcqo(n, p, S, EqualTol=TRUE, ESOpt=TRUE, EqualMax=TRUE,
- family="binomial", hiabundance=5, seed=123, Rank=Rank)
-fitc = cqo(attr(mydata, "formula"), ITol=TRUE, data=mydata,
- fam=binomialff(mv=TRUE, link="cloglog"), Rank=Rank)
-fitl = cqo(attr(mydata, "formula"), ITol=TRUE, data=mydata,
- fam=binomialff(mv=TRUE, link="logit"), Rank=Rank)
+mydata = rcqo(n, p, S, EqualTol = TRUE, ESOpt = TRUE, EqualMax = TRUE,
+ family = "binomial", hiabundance=5, seed = 123, Rank = Rank)
+fitc = cqo(attr(mydata, "formula"), ITol = TRUE, data = mydata,
+ fam = binomialff(mv = TRUE, link = "cloglog"), Rank = Rank)
+fitl = cqo(attr(mydata, "formula"), ITol = TRUE, data = mydata,
+ fam = binomialff(mv = TRUE, link = "logit"), Rank = Rank)
# Compare the fitted models (cols 1 and 3) with the truth (col 2)
cbind(ccoef(fitc), attr(mydata, "ccoefficients"), ccoef(fitl))
diff --git a/man/constraints.Rd b/man/constraints.Rd
index f1f4da6..f8ec53b 100644
--- a/man/constraints.Rd
+++ b/man/constraints.Rd
@@ -1,5 +1,6 @@
\name{constraints}
\alias{constraints}
+\alias{constraints.vlm}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Constraint Matrices }
\description{
@@ -9,6 +10,7 @@
}
\usage{
constraints(object, ...)
+constraints.vlm(object, type = c("lm", "term"), all = TRUE, which, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -16,10 +18,26 @@ constraints(object, ...)
Some \pkg{VGAM} object, for example, having
class \code{\link{vglmff-class}}.
+
+ }
+ \item{type}{
+ Character. Whether LM- or term-type constraints are to be returned.
+ The number of such matrices returned is equal to
+ \code{nvar(object, type = "lm")} and
+ the number of terms, respectively.
+
+
+ }
+ \item{all, which}{
+ If \code{all = FALSE} then \code{which} gives the integer index or a
+ vector of logicals specifying the selection.
+
+
}
\item{\dots}{
Other possible arguments such as \code{type}.
+
}
}
@@ -40,7 +58,9 @@ constraints(object, ...)
}
\value{
- This extractor function returns a list comprising of
+ The extractor function
+ \code{constraints()}
+ returns a list comprising of
constraint matrices---usually one for each column of the
VLM model matrix, and in that order.
The list is labelled with the variable names.
@@ -53,9 +73,11 @@ constraints(object, ...)
For \code{\link{vglm}} and \code{\link{vgam}} objects,
- feeding in the \code{"lm"}-type constraint matrices back
+ feeding in \code{type = "term"} constraint matrices back
into the same model should work and give an identical model.
- The default are the \code{"vlm"}-type constraint matrices.
+ The default are the \code{"lm"}-type constraint matrices;
+ this is a list with one constraint matrix per column of
+ the LM matrix.
See the \code{constraints} argument of \code{\link{vglm}},
and the example below.
@@ -106,6 +128,8 @@ information.
\seealso{
+ \code{\link{is.parallel}},
+ \code{\link{is.zero}}.
VGLMs are described in \code{\link{vglm-class}};
RR-VGLMs are described in \code{\link{rrvglm-class}}.
@@ -124,13 +148,14 @@ pneumo <- transform(pneumo, let = log(exposure.time))
(fit1 <- vglm(cbind(normal, mild, severe) ~ bs(let, 3),
cumulative(parallel = TRUE, reverse = TRUE), pneumo))
coef(fit1, matrix = TRUE)
-constraints(fit1) # Parallel assumption results in this
-constraints(fit1, type = "vlm") # This is the same as the default ("vlm"-type)
+constraints(fit1) # Parallel assumption results in this
+constraints(fit1, type = "term") # This is the same as the default ("vlm"-type)
+is.parallel(fit1)
-# An equivalent model to fit1 (needs the type "lm" constraints):
-clist.lm <- constraints(fit1, type = "lm") # The "lm"-type constraints
+# An equivalent model to fit1 (needs the type "term" constraints):
+clist.term <- constraints(fit1, type = "term") # The "term"-type constraints
(fit2 <- vglm(cbind(normal, mild, severe) ~ bs(let, 3),
- cumulative(reverse = TRUE), pneumo, constraints = clist.lm))
+ cumulative(reverse = TRUE), pneumo, constraints = clist.term))
abs(max(coef(fit1, matrix = TRUE) -
coef(fit2, matrix = TRUE))) # Should be zero
diff --git a/man/cqo.Rd b/man/cqo.Rd
index fe168ba..291b509 100644
--- a/man/cqo.Rd
+++ b/man/cqo.Rd
@@ -440,125 +440,121 @@ contains further information and examples.
}
\examples{
+\dontrun{
# Example 1; Fit an unequal tolerances model to the hunting spiders data
-hspider[,1:6]=scale(hspider[,1:6]) # Standardize the environmental variables
+hspider[,1:6] <- scale(hspider[,1:6]) # Standardize the environmental variables
set.seed(1234) # For reproducibility of the results
-p1ut = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
- Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
- Trocterr, Zoraspin) ~
- WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- fam = poissonff, data = hspider, Crow1positive = FALSE,
- EqualTol = FALSE)
+p1ut <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
+ Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ fam = poissonff, data = hspider, Crow1positive = FALSE,
+ EqualTol = FALSE)
sort(p1ut at misc$deviance.Bestof) # A history of all the iterations
if(deviance(p1ut) > 1177) warning("suboptimal fit obtained")
-\dontrun{
-S = ncol(p1ut at y) # Number of species
-clr = (1:(S+1))[-7] # Omits yellow
+S <- ncol(depvar(p1ut)) # Number of species
+clr <- (1:(S+1))[-7] # Omits yellow
lvplot(p1ut, y = TRUE, lcol = clr, pch = 1:S, pcol = clr, las = 1) # ordination diagram
-legend("topright", leg = colnames(p1ut at y), col = clr,
- pch = 1:S, merge = TRUE, bty = "n", lty = 1:S, lwd = 2) }
-(cp = Coef(p1ut))
+legend("topright", leg = colnames(depvar(p1ut)), col = clr,
+ pch = 1:S, merge = TRUE, bty = "n", lty = 1:S, lwd = 2)
+(cp <- Coef(p1ut))
-(a = cp at lv[cp at lvOrder]) # The ordered site scores along the gradient
+(a <- cp at lv[cp at lvOrder]) # The ordered site scores along the gradient
# Names of the ordered sites along the gradient:
rownames(cp at lv)[cp at lvOrder]
-(a = (cp at Optimum)[,cp at OptimumOrder]) # The ordered optima along the gradient
-a = a[!is.na(a)] # Delete the species that is not unimodal
-names(a) # Names of the ordered optima along the gradient
+(aa <- (cp at Optimum)[,cp at OptimumOrder]) # The ordered optima along the gradient
+aa <- aa[!is.na(aa)] # Delete the species that is not unimodal
+names(aa) # Names of the ordered optima along the gradient
-\dontrun{
trplot(p1ut, whichSpecies = 1:3, log = "xy", type = "b", lty = 1, lwd = 2,
col = c("blue","red","green"), label = TRUE) -> ii # trajectory plot
-legend(0.00005, 0.3, paste(ii$species[,1], ii$species[,2], sep = " and "),
- lwd = 2, lty = 1, col = c("blue","red","green"))
+legend(0.00005, 0.3, paste(ii$species[, 1], ii$species[, 2], sep = " and "),
+ lwd = 2, lty = 1, col = c("blue", "red", "green"))
abline(a = 0, b = 1, lty = "dashed")
-S = ncol(p1ut at y) # Number of species
-clr = (1:(S+1))[-7] # Omits yellow
+S <- ncol(depvar(p1ut)) # Number of species
+clr <- (1:(S+1))[-7] # Omits yellow
persp(p1ut, col = clr, label = TRUE, las = 1) # perspective plot
-}
# Example 2; Fit an equal tolerances model. Less numerically fraught.
set.seed(1234)
-p1et = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
- Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
- Trocterr, Zoraspin) ~
- WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- fam = poissonff, data = hspider, Crow1positive = FALSE)
+p1et <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
+ Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ poissonff, data = hspider, Crow1positive = FALSE)
sort(p1et at misc$deviance.Bestof) # A history of all the iterations
-if(deviance(p1et) > 1586) warning("suboptimal fit obtained")
-\dontrun{
-S = ncol(p1et at y) # Number of species
-clr = (1:(S+1))[-7] # Omits yellow
-persp(p1et, col = clr, label = TRUE, las = 1) }
+if (deviance(p1et) > 1586) warning("suboptimal fit obtained")
+S <- ncol(depvar(p1et)) # Number of species
+clr <- (1:(S+1))[-7] # Omits yellow
+persp(p1et, col = clr, label = TRUE, las = 1)
# Example 3: A rank-2 equal tolerances CQO model with Poisson data
# This example is numerically fraught... need IToler = TRUE too.
set.seed(555)
-p2 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
- Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
- Trocterr, Zoraspin) ~
- WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- fam = poissonff, data = hspider, Crow1positive = FALSE,
- IToler = TRUE, Rank = 2, Bestof = 3, isdlv = c(2.1, 0.9))
+p2 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
+ Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ poissonff, data = hspider, Crow1positive = FALSE,
+ IToler = TRUE, Rank = 2, Bestof = 3, isdlv = c(2.1, 0.9))
sort(p2 at misc$deviance.Bestof) # A history of all the iterations
if(deviance(p2) > 1127) warning("suboptimal fit obtained")
-\dontrun{
lvplot(p2, ellips = FALSE, label = TRUE, xlim = c(-3,4),
C = TRUE, Ccol = "brown", sites = TRUE, scol = "grey",
- pcol = "blue", pch = "+", chull = TRUE, ccol = "grey") }
+ pcol = "blue", pch = "+", chull = TRUE, ccol = "grey")
# Example 4: species packing model with presence/absence data
set.seed(2345)
-n = 200; p = 5; S = 5
-mydata = rcqo(n, p, S, fam = "binomial", hiabundance = 4,
- EqualTol = TRUE, ESOpt = TRUE, EqualMax = TRUE)
-myform = attr(mydata, "formula")
+n <- 200; p <- 5; S <- 5
+mydata <- rcqo(n, p, S, fam = "binomial", hiabundance = 4,
+ EqualTol = TRUE, ESOpt = TRUE, EqualMax = TRUE)
+myform <- attr(mydata, "formula")
set.seed(1234)
-b1et = cqo(myform, binomialff(mv = TRUE, link = "cloglog"), data = mydata)
+b1et <- cqo(myform, binomialff(mv = TRUE, link = "cloglog"), data = mydata)
sort(b1et at misc$deviance.Bestof) # A history of all the iterations
-\dontrun{ lvplot(b1et, y = TRUE, lcol = 1:S, pch = 1:S, pcol = 1:S, las = 1) }
+lvplot(b1et, y = TRUE, lcol = 1:S, pch = 1:S, pcol = 1:S, las = 1)
Coef(b1et)
# Compare the fitted model with the 'truth'
-cbind(truth=attr(mydata, "ccoefficients"), fitted = ccoef(b1et))
+cbind(truth = attr(mydata, "ccoefficients"), fitted = ccoef(b1et))
# Example 5: Plot the deviance residuals for diagnostic purposes
set.seed(1234)
-p1et = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
- Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
- Trocterr, Zoraspin) ~
- WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- fam = poissonff, data = hspider, EqualTol = TRUE, trace = FALSE)
+p1et <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
+ Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ poissonff, data = hspider, EqualTol = TRUE, trace = FALSE)
sort(p1et at misc$deviance.Bestof) # A history of all the iterations
if(deviance(p1et) > 1586) warning("suboptimal fit obtained")
-S = ncol(p1et at y)
-par(mfrow = c(3,4))
+S <- ncol(depvar(p1et))
+par(mfrow = c(3, 4))
for(ii in 1:S) {
- tempdata = data.frame(lv1 = c(lv(p1et)), sppCounts = p1et at y[,ii])
- tempdata = transform(tempdata, myOffset = -0.5 * lv1^2)
+ tempdata <- data.frame(lv1 = c(lv(p1et)), sppCounts = depvar(p1et)[, ii])
+ tempdata <- transform(tempdata, myOffset = -0.5 * lv1^2)
# For species ii, refit the model to get the deviance residuals
- fit1 = vglm(sppCounts ~ offset(myOffset) + lv1, fam = poissonff,
- data = tempdata, trace = FALSE)
+ fit1 <- vglm(sppCounts ~ offset(myOffset) + lv1, fam = poissonff,
+ data = tempdata, trace = FALSE)
# For checking: this should be 0
- print("max(abs(c(Coef(p1et)@B1[1,ii], Coef(p1et)@A[ii,1]) - coef(fit1)))")
- print( max(abs(c(Coef(p1et)@B1[1,ii], Coef(p1et)@A[ii,1]) - coef(fit1))) )
-
-# # Plot the deviance residuals
- devresid = resid(fit1, type = "deviance")
- predvalues = predict(fit1) + fit1 at offset
- ooo = with(tempdata, order(lv1))
-\dontrun{
- with(tempdata, plot(lv1, predvalues + devresid, col = "darkgreen",
- xlab = "lv1", ylab = "", main = colnames(p1et at y)[ii]))
- with(tempdata, lines(lv1[ooo], predvalues[ooo], col = "blue")) }
+ print("max(abs(c(Coef(p1et)@B1[1,ii], Coef(p1et)@A[ii,1]) - coef(fit1)))")
+ print( max(abs(c(Coef(p1et)@B1[1,ii], Coef(p1et)@A[ii,1]) - coef(fit1))) )
+
+# Plot the deviance residuals
+ devresid <- resid(fit1, type = "deviance")
+ predvalues <- predict(fit1) + fit1 at offset
+ ooo <- with(tempdata, order(lv1))
+ with(tempdata, plot(lv1, predvalues + devresid, col = "darkgreen",
+ xlab = "lv1", ylab = "", main = colnames(depvar(p1et))[ii]))
+ with(tempdata, lines(lv1[ooo], predvalues[ooo], col = "blue"))
+}
}
}
\keyword{models}
diff --git a/man/crashes.Rd b/man/crashes.Rd
index 043cb98..b3594fa 100644
--- a/man/crashes.Rd
+++ b/man/crashes.Rd
@@ -16,7 +16,7 @@
bicycles and pedestrians. There are some alcohol-related
data too.
- }
+}
\usage{
data(crashi)
data(crashf)
@@ -33,7 +33,7 @@ data(alclevels)
\describe{
- \item{Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday}{
+ \item{Mon, Tue, Wed, Thu, Fri, Sat, Sun}{
Day of the week.
@@ -89,7 +89,7 @@ data(alclevels)
}
\seealso{
\code{\link{rrvglm}},
- \code{\link{rcam}},
+ \code{\link{rcim}},
\code{\link{grc}}.
}
diff --git a/man/cratio.Rd b/man/cratio.Rd
index c0a4c77..e194610 100644
--- a/man/cratio.Rd
+++ b/man/cratio.Rd
@@ -7,8 +7,7 @@
regression model to an ordered (preferably) factor response.
}
\usage{
-cratio(link = "logit", earg = list(),
- parallel = FALSE, reverse = FALSE, zero = NULL,
+cratio(link = "logit", parallel = FALSE, reverse = FALSE, zero = NULL,
whitespace = FALSE)
}
%- maybe also 'usage' for other objects documented here.
@@ -19,11 +18,6 @@ cratio(link = "logit", earg = list(),
See \code{\link{Links}} for more choices.
}
- \item{earg}{
- List. Extra argument for the link function.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{parallel}{
A logical, or formula specifying which terms have
equal/unequal coefficients.
@@ -122,7 +116,8 @@ contains further information and examples.
}
\section{Warning }{
- No check is made to verify that the response is ordinal;
+ No check is made to verify that the response is ordinal if the
+ response is a matrix;
see \code{\link[base:factor]{ordered}}.
}
diff --git a/man/cumulative.Rd b/man/cumulative.Rd
index 169d289..d9cb751 100644
--- a/man/cumulative.Rd
+++ b/man/cumulative.Rd
@@ -9,11 +9,10 @@
}
\usage{
-cumulative(link = "logit", earg = list(), parallel = FALSE,
- reverse = FALSE, mv = FALSE, intercept.apply = FALSE,
- whitespace = FALSE)
+cumulative(link = "logit", parallel = FALSE, reverse = FALSE,
+ mv = FALSE, intercept.apply = FALSE, whitespace = FALSE)
}
-%scumulative(link="logit", earg = list(),
+%scumulative(link="logit",
% lscale="loge", escale = list(),
% parallel = FALSE, sparallel = TRUE, reverse = FALSE, iscale = 1)
%- maybe also 'usage' for other objects documented here.
@@ -33,16 +32,13 @@ cumulative(link = "logit", earg = list(), parallel = FALSE,
%
% }
- \item{earg}{
- List. Extra argument for the link function.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{parallel}{
- A logical or formula specifying which terms have
+ A logical or formula specifying which terms have
equal/unequal coefficients.
See below for more information about the parallelism assumption.
+ The default results in what some people call the
+ \emph{generalized ordered logit model} to be fitted.
}
@@ -170,9 +166,11 @@ Dobson, A. J. and Barnett, A. (2008)
\emph{An Introduction to Generalized Linear Models},
3rd ed. Boca Raton: Chapman & Hall/CRC Press.
+
McCullagh, P. and Nelder, J. A. (1989)
\emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
Simonoff, J. S. (2003)
\emph{Analyzing Categorical Data},
New York: Springer-Verlag.
@@ -264,7 +262,8 @@ by the \pkg{VGAM} package can be found at
}
\section{Warning }{
- No check is made to verify that the response is ordinal;
+ No check is made to verify that the response is ordinal if the
+ response is a matrix;
see \code{\link[base:factor]{ordered}}.
@@ -292,9 +291,9 @@ by the \pkg{VGAM} package can be found at
}
\examples{
# Fit the proportional odds model, p.179, in McCullagh and Nelder (1989)
-pneumo = transform(pneumo, let = log(exposure.time))
-(fit = vglm(cbind(normal, mild, severe) ~ let,
- cumulative(parallel = TRUE, reverse = TRUE), pneumo))
+pneumo <- transform(pneumo, let = log(exposure.time))
+(fit <- vglm(cbind(normal, mild, severe) ~ let,
+ cumulative(parallel = TRUE, reverse = TRUE), pneumo))
depvar(fit) # Sample proportions (good technique)
fit at y # Sample proportions (bad technique)
weights(fit, type = "prior") # Number of observations
@@ -302,23 +301,23 @@ coef(fit, matrix = TRUE)
constraints(fit) # Constraint matrices
# Check that the model is linear in let ----------------------
-fit2 = vgam(cbind(normal, mild, severe) ~ s(let, df = 2),
- cumulative(reverse = TRUE), pneumo)
+fit2 <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2),
+ cumulative(reverse = TRUE), pneumo)
\dontrun{ plot(fit2, se = TRUE, overlay = TRUE, lcol = 1:2, scol = 1:2) }
# Check the proportional odds assumption with a LRT ----------
-(fit3 = vglm(cbind(normal, mild, severe) ~ let,
- cumulative(parallel = FALSE, reverse = TRUE), pneumo))
+(fit3 <- vglm(cbind(normal, mild, severe) ~ let,
+ cumulative(parallel = FALSE, reverse = TRUE), pneumo))
pchisq(2 * (logLik(fit3) - logLik(fit)),
df = length(coef(fit3)) - length(coef(fit)), lower.tail = FALSE)
lrtest(fit3, fit) # More elegant
# A factor() version of fit ----------------------------------
# This is in long format (cf. wide format above)
-Nobs = round(depvar(fit) * c(weights(fit, type = "prior")))
-sumNobs = colSums(Nobs) # apply(Nobs, 2, sum)
+Nobs <- round(depvar(fit) * c(weights(fit, type = "prior")))
+sumNobs <- colSums(Nobs) # apply(Nobs, 2, sum)
-pneumo.long =
+pneumo.long <-
data.frame(symptoms = ordered(rep(rep(colnames(Nobs), nrow(Nobs)),
times = c(t(Nobs))),
levels = colnames(Nobs)),
@@ -327,18 +326,18 @@ pneumo.long =
with(pneumo.long, table(let, symptoms)) # Check it; should be same as pneumo
-(fit.long1 = vglm(symptoms ~ let, data = pneumo.long,
+(fit.long1 <- vglm(symptoms ~ let, data = pneumo.long,
cumulative(parallel = TRUE, reverse = TRUE), trace = TRUE))
coef(fit.long1, matrix = TRUE) # Should be same as coef(fit, matrix = TRUE)
# Could try using mustart if fit.long1 failed to converge.
-mymustart = matrix(sumNobs / sum(sumNobs),
+mymustart <- matrix(sumNobs / sum(sumNobs),
nrow(pneumo.long), ncol(Nobs), byrow = TRUE)
-fit.long2 = vglm(symptoms ~ let,
- fam = cumulative(parallel = TRUE, reverse = TRUE),
- mustart = mymustart, data = pneumo.long, trace = TRUE)
+fit.long2 <- vglm(symptoms ~ let,
+ fam = cumulative(parallel = TRUE, reverse = TRUE),
+ mustart = mymustart, data = pneumo.long, trace = TRUE)
coef(fit.long2, matrix = TRUE) # Should be same as coef(fit, matrix = TRUE)
}
\keyword{models}
\keyword{regression}
-% pneumo$let = log(pneumo$exposure.time)
+% pneumo$let <- log(pneumo$exposure.time)
diff --git a/man/dagum.Rd b/man/dagum.Rd
index 5601488..1f6714d 100644
--- a/man/dagum.Rd
+++ b/man/dagum.Rd
@@ -8,7 +8,6 @@
}
\usage{
dagum(lshape1.a = "loge", lscale = "loge", lshape2.p = "loge",
- eshape1.a = list(), escale = list(), eshape2.p = list(),
ishape1.a = NULL, iscale = NULL, ishape2.p = 1, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -19,11 +18,6 @@ dagum(lshape1.a = "loge", lscale = "loge", lshape2.p = "loge",
See \code{\link{Links}} for more choices.
}
- \item{eshape1.a, escale, eshape2.p}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{ishape1.a, iscale, ishape2.p}{
Optional initial values for \code{a}, \code{scale}, and \code{p}.
@@ -69,6 +63,7 @@ provided \eqn{-ap < 1 < a}; these are returned as the fitted values.
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
diff --git a/man/dcennormal1.Rd b/man/dcennormal1.Rd
index ff23141..f768a73 100644
--- a/man/dcennormal1.Rd
+++ b/man/dcennormal1.Rd
@@ -9,7 +9,6 @@
}
\usage{
dcennormal1(r1 = 0, r2 = 0, lmu = "identity", lsd = "loge",
- emu = list(), esd = list(),
imu = NULL, isd = NULL, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
@@ -18,8 +17,8 @@ dcennormal1(r1 = 0, r2 = 0, lmu = "identity", lsd = "loge",
Integers. Number of smallest and largest values censored, respectively.
}
- \item{lmu, lsd, emu, esd}{
- Parameter link functions and its extra arguments applied to the
+ \item{lmu, lsd}{
+ Parameter link functions applied to the
mean and standard deviation.
See \code{\link{Links}} for more choices.
diff --git a/man/dexpbinomial.Rd b/man/dexpbinomial.Rd
index fa500c4..92b0406 100644
--- a/man/dexpbinomial.Rd
+++ b/man/dexpbinomial.Rd
@@ -9,8 +9,8 @@
}
\usage{
-dexpbinomial(lmean = "logit", ldispersion = "logit", emean = list(),
- edispersion = list(), idispersion = 0.25, zero = 2)
+dexpbinomial(lmean = "logit", ldispersion = "logit",
+ idispersion = 0.25, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -21,11 +21,6 @@ dexpbinomial(lmean = "logit", ldispersion = "logit", emean = list(),
The defaults cause the parameters to be restricted to \eqn{(0,1)}.
}
- \item{emean, edispersion}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{idispersion}{
Initial value for the dispersion parameter.
If given, it must be in range, and is recyled to the necessary length.
@@ -62,6 +57,7 @@ dexpbinomial(lmean = "logit", ldispersion = "logit", emean = list(),
with respect to the binomial distribution.
See Efron (1986) for full details.
+
This \pkg{VGAM} family function implements an \emph{approximation}
(2.10) to the exact density (2.4). It replaces the normalizing
constant by unity since the true value nearly equals 1.
@@ -71,6 +67,7 @@ dexpbinomial(lmean = "logit", ldispersion = "logit", emean = list(),
the dispersion parameter can be modelled over a larger parameter space by
assigning the arguments \code{ldispersion} and \code{edispersion}.
+
Approximately, the mean (of \eqn{Y}) is \eqn{\mu}{mu}.
The \emph{effective sample size} is the dispersion parameter multiplied
by the original sample size,
@@ -79,11 +76,14 @@ dexpbinomial(lmean = "logit", ldispersion = "logit", emean = list(),
asymptotically independent because the expected information matrix
is diagonal.
+
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}.
+
}
\references{
@@ -92,6 +92,7 @@ dexpbinomial(lmean = "logit", ldispersion = "logit", emean = list(),
\emph{Journal of the American Statistical Association},
\bold{81}, 709--721.
+
}
\author{ T. W. Yee }
@@ -100,14 +101,18 @@ dexpbinomial(lmean = "logit", ldispersion = "logit", emean = list(),
as \code{\link{binomialff}}, however multivariate responses are
not allowed (\code{binomialff(mv = FALSE)}).
+
}
\section{Warning }{
Numerical difficulties can occur; if so, try using \code{idispersion}.
+
}
\seealso{
\code{\link{binomialff}},
- \code{\link{toxop}}.
+ \code{\link{toxop}},
+ \code{\link{CommonVGAMffArguments}}.
+
}
\examples{
@@ -115,68 +120,67 @@ dexpbinomial(lmean = "logit", ldispersion = "logit", emean = list(),
# differ slightly.
# Scale the variables
-toxop = transform(toxop,
- phat = positive / ssize,
- srainfall = scale(rainfall), # (6.1)
- sN = scale(ssize)) # (6.2)
+toxop <- transform(toxop,
+ phat = positive / ssize,
+ srainfall = scale(rainfall), # (6.1)
+ sN = scale(ssize)) # (6.2)
# A fit similar (should be identical) to Section 6 of Efron (1986).
# But does not use poly(), and M=1.25 here, as in (5.3)
-cmlist = list("(Intercept)" = diag(2),
- "I(srainfall)" = rbind(1,0),
- "I(srainfall^2)" = rbind(1,0),
- "I(srainfall^3)" = rbind(1,0),
- "I(sN)" = rbind(0,1),
- "I(sN^2)" = rbind(0,1))
-dlist = list(min = 0, max = 1.25)
-fit = vglm(cbind(phat, 1 - phat) * ssize ~
- I(srainfall) + I(srainfall^2) + I(srainfall^3) +
- I(sN) + I(sN^2),
- dexpbinomial(ldisp = "elogit", idisp = 0.2,
- edisp = dlist, zero = NULL),
- toxop, trace = TRUE, constraints = cmlist)
+cmlist <- list("(Intercept)" = diag(2),
+ "I(srainfall)" = rbind(1,0),
+ "I(srainfall^2)" = rbind(1,0),
+ "I(srainfall^3)" = rbind(1,0),
+ "I(sN)" = rbind(0,1),
+ "I(sN^2)" = rbind(0,1))
+fit <- vglm(cbind(phat, 1 - phat) * ssize ~
+ I(srainfall) + I(srainfall^2) + I(srainfall^3) +
+ I(sN) + I(sN^2),
+ dexpbinomial(ldisp = elogit(min = 0, max = 1.25),
+ idisp = 0.2, zero = NULL),
+ toxop, trace = TRUE, constraints = cmlist)
# Now look at the results
coef(fit, matrix = TRUE)
head(fitted(fit))
summary(fit)
vcov(fit)
-sqrt(diag(vcov(fit))) # Standard errors
+sqrt(diag(vcov(fit))) # Standard errors
# Effective sample size (not quite the last column of Table 1)
head(predict(fit))
-Dispersion = elogit(predict(fit)[,2], earg = dlist, inverse = TRUE)
+Dispersion <- elogit(predict(fit)[,2], min = 0, max = 1.25, inverse = TRUE)
c(round(weights(fit, type = "prior") * Dispersion, dig = 1))
# Ordinary logistic regression (gives same results as (6.5))
-ofit = vglm(cbind(phat, 1 - phat) * ssize ~
- I(srainfall) + I(srainfall^2) + I(srainfall^3),
- binomialff, toxop, trace = TRUE)
+ofit <- vglm(cbind(phat, 1 - phat) * ssize ~
+ I(srainfall) + I(srainfall^2) + I(srainfall^3),
+ binomialff, toxop, trace = TRUE)
# Same as fit but it uses poly(), and can be plotted (cf. Figure 1)
-cmlist2 = list("(Intercept)" = diag(2),
- "poly(srainfall, degree = 3)" = rbind(1, 0),
- "poly(sN, degree = 2)" = rbind(0, 1))
-fit2 = vglm(cbind(phat, 1 - phat) * ssize ~
- poly(srainfall, degree = 3) + poly(sN, degree = 2),
- dexpbinomial(ldisp = "elogit", idisp = 0.2,
- edisp = dlist, zero = NULL),
- toxop, trace = TRUE, constraints = cmlist2)
+cmlist2 <- list("(Intercept)" = diag(2),
+ "poly(srainfall, degree = 3)" = rbind(1, 0),
+ "poly(sN, degree = 2)" = rbind(0, 1))
+fit2 <- vglm(cbind(phat, 1 - phat) * ssize ~
+ poly(srainfall, degree = 3) + poly(sN, degree = 2),
+ dexpbinomial(ldisp = elogit(min = 0, max = 1.25),
+ idisp = 0.2, zero = NULL),
+ toxop, trace = TRUE, constraints = cmlist2)
\dontrun{ par(mfrow = c(1, 2))
plotvgam(fit2, se = TRUE, lcol = "blue", scol = "red") # Cf. Figure 1
# Cf. Figure 1(a)
par(mfrow = c(1,2))
-ooo = with(toxop, sort.list(rainfall))
+ooo <- with(toxop, sort.list(rainfall))
with(toxop, plot(rainfall[ooo], fitted(fit2)[ooo], type = "l",
col = "blue", las = 1, ylim = c(0.3, 0.65)))
with(toxop, points(rainfall[ooo], fitted(ofit)[ooo], col = "red",
type = "b", pch = 19))
# Cf. Figure 1(b)
-ooo = with(toxop, sort.list(ssize))
+ooo <- with(toxop, sort.list(ssize))
with(toxop, plot(ssize[ooo], Dispersion[ooo], type = "l", col = "blue",
las = 1, xlim = c(0, 100))) }
}
diff --git a/man/df.residual.Rd b/man/df.residual.Rd
index a2ca589..0ec788c 100644
--- a/man/df.residual.Rd
+++ b/man/df.residual.Rd
@@ -19,7 +19,7 @@ df.residual_vlm(object, type = c("vlm", "lm"), \dots)
}
\item{type}{
the type of residual degrees-of-freedom wanted.
- In some applications the 'usual' LM-type value is requested.
+ In some applications the 'usual' LM-type value may be more appropriate.
The default is the first choice.
}
@@ -29,23 +29,28 @@ df.residual_vlm(object, type = c("vlm", "lm"), \dots)
}
}
\details{
- When a VGLM is fitted, a large ordinary least squares
- (OLS) fit is performed.
- The number of rows is \eqn{M} times the 'ordinary' number
- of rows of the LM-type model.
+ When a VGLM is fitted, a \emph{large} (VLM) generalized least
+ squares (GLS) fit is done at each IRLS iteration. To do this, an
+ ordinary least squares (OLS) fit is performed by
+ transforming the GLS using Cholesky factors.
+ The number of rows is \eqn{M} times the `ordinary' number
+ of rows of the LM-type model: \eqn{nM}.
Here, \eqn{M} is the number of linear/additive predictors.
- The formula for the VLM-type residual degrees-of-freedom
+ So the formula for the VLM-type residual degrees-of-freedom
is \eqn{nM - p^{*}} where \eqn{p^{*}} is the number of
- columns of the 'big' VLM matrix.
+ columns of the `big' VLM matrix.
The formula for the LM-type residual degrees-of-freedom
- is \eqn{n - p} where \eqn{p} is the number of
- columns of the 'ordinary' LM matrix.
+ is \eqn{n - p_{j}} where \eqn{p_{j}} is the number of
+ columns of the `ordinary' LM matrix corresponding
+ to the \eqn{j}th linear/additive predictor.
}
\value{
The value of the residual degrees-of-freedom extracted
from the object.
-
+ When \code{type = "vlm"} this is a single integer, and
+ when \code{type = "lm"} this is a \eqn{M}-vector of
+ integers.
}
\seealso{
@@ -59,16 +64,18 @@ df.residual_vlm(object, type = c("vlm", "lm"), \dots)
\examples{
pneumo <- transform(pneumo, let = log(exposure.time))
-(fit <- vglm(cbind(normal,mild,severe) ~ let, propodds, pneumo))
-model.matrix(fit)
+(fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo))
+head(model.matrix(fit, type = "vlm"))
+head(model.matrix(fit, type = "lm"))
-df.residual(fit, type = "vlm")
-nobs(fit, type = "vlm")
-nvar(fit, type = "vlm")
+df.residual(fit, type = "vlm") # n * M - p_VLM
+nobs(fit, type = "vlm") # n * M
+nvar(fit, type = "vlm") # p_VLM
-df.residual(fit, type = "lm") # This is more usual to some people
-nobs(fit, type = "lm")
-nvar(fit, type = "lm")
+df.residual(fit, type = "lm") # n - p_LM(j); Useful in some situations
+nobs(fit, type = "lm") # n
+nvar(fit, type = "lm") # p_LM
+nvar_vlm(fit, type = "lm") # p_LM(j) (<= p_LM elementwise)
}
\keyword{models}
diff --git a/man/dirichlet.Rd b/man/dirichlet.Rd
index bffd608..a97ea95 100644
--- a/man/dirichlet.Rd
+++ b/man/dirichlet.Rd
@@ -7,7 +7,7 @@
}
\usage{
-dirichlet(link = "loge", earg=list(), parallel = FALSE, zero=NULL)
+dirichlet(link = "loge", parallel = FALSE, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -18,15 +18,12 @@ dirichlet(link = "loge", earg=list(), parallel = FALSE, zero=NULL)
See \code{\link{Links}} for more choices.
The default gives \eqn{\eta_j=\log(\alpha_j)}{eta_j=log(alpha_j)}.
- }
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
}
\item{parallel, zero}{
See \code{\link{CommonVGAMffArguments}} for more information.
+
}
}
\details{
@@ -62,6 +59,7 @@ dirichlet(link = "loge", earg=list(), parallel = FALSE, zero=NULL)
alpha_{+}}, which are returned as the fitted values.
For this distribution Fisher scoring corresponds to Newton-Raphson.
+
The Dirichlet distribution can be motivated by considering the random variables
\eqn{(G_1,\ldots,G_{M})^T}{(G_1,\ldots,G_M)^T} which are each independent
and identically distributed as a gamma distribution with density
@@ -70,6 +68,7 @@ dirichlet(link = "loge", earg=list(), parallel = FALSE, zero=NULL)
Then the Dirichlet distribution arises when
\eqn{Y_j=G_j / (G_1 + \cdots + G_M)}{Y_j = G_j / (G_1 + ... + G_M)}.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -77,19 +76,23 @@ dirichlet(link = "loge", earg=list(), parallel = FALSE, zero=NULL)
\code{\link{rrvglm}}
and \code{\link{vgam}}.
+
When fitted, the \code{fitted.values} slot of the object contains the
\eqn{M}-column matrix of means.
+
}
\references{
Lange, K. (2002)
\emph{Mathematical and Statistical Methods for Genetic Analysis},
2nd ed. New York: Springer-Verlag.
+
Evans, M., Hastings, N. and Peacock, B. (2000)
\emph{Statistical Distributions},
New York: Wiley-Interscience, Third edition.
+
%Documentation accompanying the \pkg{VGAM} package at
%\url{http://www.stat.auckland.ac.nz/~yee}
%contains further information and examples.
@@ -103,6 +106,7 @@ New York: Wiley-Interscience, Third edition.
Another similar distribution to the Dirichlet is the
Dirichlet-multinomial (see \code{\link{dirmultinomial}}).
+
}
\seealso{
@@ -111,12 +115,14 @@ New York: Wiley-Interscience, Third edition.
\code{\link{multinomial}},
\code{\link{simplex}}.
+
}
\examples{
-y = rdiric(n=1000, shape=exp(c(-1,1,0)))
-fit = vglm(y ~ 1, dirichlet, trace = TRUE, crit="c")
+ydata <- data.frame(rdiric(n = 1000, shape = exp(c(-1, 1, 0))))
+colnames(ydata) <- paste("y", 1:3, sep = "")
+fit <- vglm(cbind(y1, y2, y3) ~ 1, dirichlet, ydata, trace = TRUE, crit = "coef")
Coef(fit)
-coef(fit, matrix=TRUE)
+coef(fit, matrix = TRUE)
head(fitted(fit))
}
\keyword{models}
diff --git a/man/dirmul.old.Rd b/man/dirmul.old.Rd
index e876613..5c906d6 100644
--- a/man/dirmul.old.Rd
+++ b/man/dirmul.old.Rd
@@ -7,7 +7,7 @@
non-negative integers.
}
\usage{
-dirmul.old(link = "loge", earg = list(),
+dirmul.old(link = "loge",
init.alpha = 0.01, parallel = FALSE, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -19,11 +19,6 @@ dirmul.old(link = "loge", earg = list(),
Here, \eqn{M} is the number of columns of the response matrix.
}
- \item{earg}{
- List. Extra argument for \code{link}.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{init.alpha}{
Numeric vector. Initial values for the
\code{alpha} vector. Must be positive.
@@ -149,8 +144,8 @@ fit <- vglm(cbind(Allele5,Allele6,Allele7,Allele8,Allele9,
(sfit <- summary(fit))
vcov(sfit)
-round(eta2theta(coef(fit), fit at misc$link), dig = 2) # not preferred
-round(Coef(fit), dig = 2) # preferred # preferred
+round(eta2theta(coef(fit), fit at misc$link, fit at misc$earg), dig = 2) # not preferred
+round(Coef(fit), dig = 2) # preferred
round(t(fitted(fit)), dig = 4) # 2nd row of Table 3.5 of Lange (2002)
coef(fit, matrix = TRUE)
@@ -159,8 +154,9 @@ pfit <- vglm(cbind(Allele5,Allele6,Allele7,Allele8,Allele9,
Allele10,Allele11,Allele12) ~ 1,
dirmul.old(parallel = TRUE), trace = TRUE,
data = alleleCounts)
-round(eta2theta(coef(pfit), pfit at misc$link), dig = 2) # not preferred
-round(Coef(pfit), dig = 2) # preferred
+round(eta2theta(coef(pfit, matrix = TRUE), pfit at misc$link,
+ pfit at misc$earg), dig = 2) # 'Right' answer
+round(Coef(pfit), dig = 2) # 'Wrong' answer due to parallelism constraint
}
\keyword{models}
\keyword{regression}
diff --git a/man/dirmultinomial.Rd b/man/dirmultinomial.Rd
index 960d0dd..68daa9d 100644
--- a/man/dirmultinomial.Rd
+++ b/man/dirmultinomial.Rd
@@ -7,8 +7,7 @@
}
\usage{
-dirmultinomial(lphi="logit", ephi = list(), iphi = 0.10,
- parallel= FALSE, zero="M")
+dirmultinomial(lphi = "logit", iphi = 0.10, parallel = FALSE, zero = "M")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -17,17 +16,14 @@ dirmultinomial(lphi="logit", ephi = list(), iphi = 0.10,
parameter, which lies in the open unit interval \eqn{(0,1)}.
See \code{\link{Links}} for more choices.
- }
- \item{ephi}{
- List. Extra argument for \code{lphi}.
- See \code{earg} in \code{\link{Links}} for general information.
}
\item{iphi}{
Numeric. Initial value for \eqn{\phi}{phi}.
Must be in the open unit interval \eqn{(0,1)}.
- If a failure to converge occurs try assigning this argument a different
- value.
+ If a failure to converge occurs then try assigning this argument
+ a different value.
+
}
\item{parallel}{
@@ -36,8 +32,8 @@ dirmultinomial(lphi="logit", ephi = list(), iphi = 0.10,
are to be equal via equal coefficients.
Note \eqn{\pi_M}{pi_M} will generally be different from the
other probabilities.
- Setting \code{parallel=TRUE} will only work if you also set
- \code{zero=NULL} because of interference between these arguments
+ Setting \code{parallel = TRUE} will only work if you also set
+ \code{zero = NULL} because of interference between these arguments
(with respect to the intercept term).
}
@@ -48,7 +44,7 @@ dirmultinomial(lphi="logit", ephi = list(), iphi = 0.10,
If the character \code{"M"} then this means the numerical value
\eqn{M}, which corresponds to linear/additive predictor associated
with \eqn{\phi}{phi}.
- Setting \code{zero=NULL} means none of the values from
+ Setting \code{zero = NULL} means none of the values from
the set \eqn{\{1,2,\ldots,M\}}.
}
@@ -176,21 +172,23 @@ Overdispersion in allelic counts and \eqn{\theta}-correction in forensic genetic
\code{\link{dirichlet}},
\code{\link{multinomial}}.
+
}
\examples{
-n <- 10; M <- 5
-y <- round(matrix(runif(n*M)*10, n, M)) # Integer counts
-fit <- vglm(y ~ 1, dirmultinomial, trace = TRUE)
+nn <- 10; M <- 5
+ydata <- data.frame(round(matrix(runif(nn * M, max = 10), nn, M))) # Integer counts
+colnames(ydata) <- paste("y", 1:M, sep = "")
+
+fit <- vglm(cbind(y1, y2, y3, y4, y5) ~ 1, dirmultinomial, ydata, trace = TRUE)
head(fitted(fit))
-fit at y # Sample proportions
+depvar(fit) # Sample proportions
weights(fit, type = "prior", matrix = FALSE) # Total counts per row
-x <- runif(n)
-fit <- vglm(y ~ x, dirmultinomial, trace = TRUE)
-\dontrun{
-Coef(fit) # This does not work
-}
+ydata <- transform(ydata, x2 = runif(nn))
+fit <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2, dirmultinomial, ydata, trace = TRUE)
+\dontrun{ # This does not work:
+Coef(fit) }
coef(fit, matrix = TRUE)
(sfit <- summary(fit))
vcov(sfit)
diff --git a/man/eexpUC.Rd b/man/eexpUC.Rd
index 87caae2..f25ac0f 100644
--- a/man/eexpUC.Rd
+++ b/man/eexpUC.Rd
@@ -117,11 +117,11 @@ lines(yy, pexp(yy), col = "darkgreen", lty = "dotted", lwd = 2) }
%yy = rexp(nn, rate=myrate)
%(myexp = qeexp(my_p, rate=myrate))
%sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy)) # Should be my_p
-%peexp(-Inf, rate=myrate) # Should be 0
-%peexp( Inf, rate=myrate) # Should be 1
+%peexp(-Inf, rate = myrate) # Should be 0
+%peexp( Inf, rate = myrate) # Should be 1
%peexp(mean(yy), rate=myrate) # Should be 0.5
%abs(qeexp(0.5, rate=myrate) - mean(yy)) # Should be 0
-%abs(peexp(myexp, rate=myrate) - my_p) # Should be 0
+%abs(peexp(myexp, rate=myrate) - my_p) # Should be 0
%integrate(f = deexp, lower=-1, upper = Inf, rate=myrate) # Should be 1
diff --git a/man/enzyme.Rd b/man/enzyme.Rd
index a8743dc..7991237 100644
--- a/man/enzyme.Rd
+++ b/man/enzyme.Rd
@@ -15,9 +15,13 @@
}
\details{
Sorry, more details need to be included later.
+
+
}
\source{
Sorry, more details need to be included later.
+
+
}
\references{
Watts, D. G. (1981)
@@ -26,12 +30,14 @@ Watts, D. G. (1981)
\emph{Kinetic Data Analysis: Design and Analysis of Enzyme and
Pharmacokinetic Experiments}, pp.1--24.
New York: Plenum Press.
+
+
}
\seealso{
\code{\link{micmen}}.
}
\examples{
-fit = vglm(velocity ~ 1, micmen, data=enzyme, trace = TRUE,
+fit <- vglm(velocity ~ 1, micmen, data = enzyme, trace = TRUE,
form2 = ~ conc - 1, crit = "crit")
summary(fit)
}
diff --git a/man/erf.Rd b/man/erf.Rd
index 6980a57..32109e4 100644
--- a/man/erf.Rd
+++ b/man/erf.Rd
@@ -4,6 +4,7 @@
\title{ Error Function }
\description{
Computes the error function based on the normal distribution.
+
}
\usage{
erf(x)
diff --git a/man/erlang.Rd b/man/erlang.Rd
index 18d0456..46a0791 100644
--- a/man/erlang.Rd
+++ b/man/erlang.Rd
@@ -7,7 +7,7 @@
by maximum likelihood estimation.
}
\usage{
-erlang(shape.arg, link = "loge", earg=list(), imethod = 1)
+erlang(shape.arg, link = "loge", imethod = 1, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -21,17 +21,11 @@ erlang(shape.arg, link = "loge", earg=list(), imethod = 1)
See \code{\link{Links}} for more choices.
}
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
+ \item{imethod, zero}{
+ See \code{\link{CommonVGAMffArguments}} for more details.
}
- \item{imethod}{
- An integer with value \code{1} or \code{2} which
- specifies the initialization method. If failure to converge occurs
- try the other value.
- }
}
\details{
The Erlang distribution is a special case of the gamma distribution
@@ -41,6 +35,7 @@ erlang(shape.arg, link = "loge", earg=list(), imethod = 1)
the sum of \code{shape.arg} independent and identically distributed
exponential random variates.
+
The probability density function of the Erlang
distribution is given by
\deqn{f(y) = \exp(-y/scale) y^{shape-1} scale^{-shape} / \Gamma(shape)}{%
@@ -56,28 +51,34 @@ erlang(shape.arg, link = "loge", earg=list(), imethod = 1)
The linear/additive predictor, by default, is
\eqn{\eta=\log(scale)}{eta=log(scale)}.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\references{
Most standard texts on statistical distributions describe
this distribution, e.g.,
+
Evans, M., Hastings, N. and Peacock, B. (2000)
\emph{Statistical Distributions},
New York: Wiley-Interscience, Third edition.
+
}
\author{ T. W. Yee }
\note{
+ Multiple responses are permitted.
The \code{rate} parameter found in \code{\link{gamma2.ab}}
is \code{1/scale} here---see also \code{\link[stats]{rgamma}}.
+
}
\seealso{
@@ -86,11 +87,11 @@ New York: Wiley-Interscience, Third edition.
}
\examples{
-rate = exp(2); myshape = 3
-edata = data.frame(y = rep(0, nn <- 1000))
+rate <- exp(2); myshape = 3
+edata <- data.frame(y = rep(0, nn <- 1000))
for(ii in 1:myshape)
- edata = transform(edata, y = y + rexp(nn, rate = rate))
-fit = vglm(y ~ 1, erlang(shape = myshape), edata, trace = TRUE)
+ edata <- transform(edata, y = y + rexp(nn, rate = rate))
+fit <- vglm(y ~ 1, erlang(shape = myshape), edata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit) # Answer = 1/rate
1/rate
diff --git a/man/eunifUC.Rd b/man/eunifUC.Rd
index ff543c9..8a88160 100644
--- a/man/eunifUC.Rd
+++ b/man/eunifUC.Rd
@@ -158,19 +158,19 @@ peunif(mean(yy), mymin, mymax) # Should be 0.5
abs(qeunif(0.5, mymin, mymax) - mean(yy)) # Should be 0
abs(qeunif(0.5, mymin, mymax) - (mymin+mymax)/2) # Should be 0
abs(peunif(myexp, mymin, mymax) - my_p) # Should be 0
-integrate(f = deunif, lower=mymin - 3, upper = mymax + 3,
- min=mymin, max=mymax) # Should be 1
+integrate(f = deunif, lower = mymin - 3, upper = mymax + 3,
+ min = mymin, max = mymax) # Should be 1
\dontrun{
-par(mfrow=c(2,1))
-yy = seq(0.0, 1.0, len=nn)
-plot(yy, deunif(yy), type="l", col="blue", ylim = c(0, 2),
+par(mfrow = c(2,1))
+yy = seq(0.0, 1.0, len = nn)
+plot(yy, deunif(yy), type = "l", col = "blue", ylim = c(0, 2),
xlab = "y", ylab = "g(y)", main = "g(y) for Uniform(0,1)")
-lines(yy, dunif(yy), col="darkgreen", lty="dotted", lwd = 2) # 'original'
+lines(yy, dunif(yy), col = "darkgreen", lty = "dotted", lwd = 2) # 'original'
-plot(yy, peunif(yy), type="l", col="blue", ylim = 0:1,
+plot(yy, peunif(yy), type = "l", col = "blue", ylim = 0:1,
xlab = "y", ylab = "G(y)", main = "G(y) for Uniform(0,1)")
-abline(a=0.0, b=1.0, col="darkgreen", lty="dotted", lwd = 2)
-abline(v=0.5, h=0.5, col="red", lty="dashed") }
+abline(a = 0.0, b = 1.0, col = "darkgreen", lty = "dotted", lwd = 2)
+abline(v = 0.5, h = 0.5, col = "red", lty = "dashed") }
}
\keyword{distribution}
diff --git a/man/expexp.Rd b/man/expexp.Rd
index 4aac939..3cb8ddb 100644
--- a/man/expexp.Rd
+++ b/man/expexp.Rd
@@ -8,7 +8,7 @@
}
\usage{
-expexp(lshape = "loge", lscale = "loge", eshape = list(), escale = list(),
+expexp(lshape = "loge", lscale = "loge",
ishape = 1.1, iscale = NULL, tolerance = 1.0e-6, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -20,11 +20,6 @@ expexp(lshape = "loge", lscale = "loge", eshape = list(), escale = list(),
The defaults ensure both parameters are positive.
}
- \item{eshape, escale}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{ishape}{
Initial value for the \eqn{\alpha}{shape}
parameter. If convergence fails try setting a different
@@ -71,17 +66,20 @@ expexp(lshape = "loge", lscale = "loge", eshape = list(), escale = list(),
scale^2}
where \eqn{\psi'}{psi'} is the trigamma function.
+
This distribution has been called the two-parameter generalized
exponential distribution by Gupta and Kundu (2006).
A special case of the exponentiated exponential distribution:
\eqn{\alpha=1}{shape=1} is the exponential distribution.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\references{
Gupta, R. D. and Kundu, D. (2001)
@@ -91,6 +89,7 @@ expexp(lshape = "loge", lscale = "loge", eshape = list(), escale = list(),
\bold{43},
117--130.
+
Gupta, R. D. and Kundu, D. (2006)
On the comparison of Fisher information of the
Weibull and GE distributions,
@@ -98,6 +97,7 @@ expexp(lshape = "loge", lscale = "loge", eshape = list(), escale = list(),
\bold{136},
3130--3144.
+
}
\author{ T. W. Yee }
\note{
@@ -107,9 +107,11 @@ expexp(lshape = "loge", lscale = "loge", eshape = list(), escale = list(),
Also, I have yet to implement Type-I right censored data using the
results of Gupta and Kundu (2006).
+
Another algorithm for fitting this model is implemented in
\code{\link{expexp1}}.
+
}
\section{Warning }{
Practical experience shows that reasonably good initial values really
@@ -120,6 +122,7 @@ expexp(lshape = "loge", lscale = "loge", eshape = list(), escale = list(),
The algorithm may fail if the estimate of the shape parameter is
too close to unity.
+
}
\seealso{
@@ -128,11 +131,12 @@ expexp(lshape = "loge", lscale = "loge", eshape = list(), escale = list(),
\code{\link{weibull}},
\code{\link{CommonVGAMffArguments}}.
+
}
\examples{
# A special case: exponential data
-y = rexp(n <- 1000)
-fit = vglm(y ~ 1, fam = expexp, trace = TRUE, maxit = 99)
+edata <- data.frame(y = rexp(n <- 1000))
+fit = vglm(y ~ 1, fam = expexp, edata, trace = TRUE, maxit = 99)
coef(fit, matrix=TRUE)
Coef(fit)
@@ -142,8 +146,8 @@ bbearings = c(17.88, 28.92, 33.00, 41.52, 42.12, 45.60,
48.80, 51.84, 51.96, 54.12, 55.56, 67.80, 68.64, 68.64,
68.88, 84.12, 93.12, 98.64, 105.12, 105.84, 127.92,
128.04, 173.40)
-fit = vglm(bbearings ~ 1, fam = expexp(iscale = 0.05, ish = 5),
- trace = TRUE, maxit = 300)
+fit <- vglm(bbearings ~ 1, fam = expexp(iscale = 0.05, ish = 5),
+ trace = TRUE, maxit = 300)
coef(fit, matrix = TRUE)
Coef(fit) # Authors get c(shape=5.2589, scale=0.0314)
logLik(fit) # Authors get -112.9763
diff --git a/man/expexp1.Rd b/man/expexp1.Rd
index 6efb6ad..69b3bf8 100644
--- a/man/expexp1.Rd
+++ b/man/expexp1.Rd
@@ -8,7 +8,7 @@
}
\usage{
-expexp1(lscale = "loge", escale = list(), iscale = NULL, ishape = 1)
+expexp1(lscale = "loge", iscale = NULL, ishape = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -17,11 +17,6 @@ expexp1(lscale = "loge", escale = list(), iscale = NULL, ishape = 1)
See \code{\link{Links}} for more choices.
}
- \item{escale}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{iscale}{
Initial value for the \eqn{\lambda}{scale} parameter.
By default, an initial value is chosen internally using \code{ishape}.
@@ -43,13 +38,17 @@ expexp1(lscale = "loge", escale = list(), iscale = NULL, ishape = 1)
Newton-Raphson is used, which compares with Fisher scoring with
\code{\link{expexp}}.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
+
}
\references{
+
Gupta, R. D. and Kundu, D. (2001)
Exponentiated exponential family: an alternative to
gamma and Weibull distributions,
@@ -57,6 +56,8 @@ expexp1(lscale = "loge", escale = list(), iscale = NULL, ishape = 1)
\bold{43},
117--130.
+
+
}
\author{ T. W. Yee }
@@ -64,36 +65,43 @@ expexp1(lscale = "loge", escale = list(), iscale = NULL, ishape = 1)
This family function works only for intercept-only models,
i.e., \code{y ~ 1} where \code{y} is the response.
+
The estimate of \eqn{\alpha}{shape} is attached to the
\code{misc} slot of the object, which is a list and contains
the component \code{shape}.
+
As Newton-Raphson is used, the working weights are sometimes
negative, and some adjustment is made to these to make them
positive.
+
Like \code{\link{expexp}}, good initial
values are needed. Convergence may be slow.
+
+
}
\section{Warning }{The standard errors produced by a
\code{summary} of the model may be wrong.
+
}
\seealso{
\code{\link{expexp}},
\code{\link{CommonVGAMffArguments}}.
+
}
\examples{
# Ball bearings data (number of million revolutions before failure)
-bbearings = data.frame(y = c(17.88, 28.92, 33.00, 41.52, 42.12, 45.60,
+bbearings <- data.frame(y = c(17.88, 28.92, 33.00, 41.52, 42.12, 45.60,
48.80, 51.84, 51.96, 54.12, 55.56, 67.80, 68.64, 68.64,
68.88, 84.12, 93.12, 98.64, 105.12, 105.84, 127.92,
128.04, 173.40))
-fit = vglm(y ~ 1, expexp1(ishape = 4), bbearings, trace = TRUE,
+fit <- vglm(y ~ 1, expexp1(ishape = 4), bbearings, trace = TRUE,
maxit = 50, checkwz = FALSE)
coef(fit, matrix = TRUE)
Coef(fit) # Authors get c(0.0314, 5.2589) with log-lik -112.9763
@@ -102,14 +110,14 @@ logLik(fit)
# Failure times of the airconditioning system of an airplane
-acplane = data.frame(y = c(23, 261, 87, 7, 120, 14, 62, 47,
+acplane <- data.frame(y = c(23, 261, 87, 7, 120, 14, 62, 47,
225, 71, 246, 21, 42, 20, 5, 12, 120, 11, 3, 14,
71, 11, 14, 11, 16, 90, 1, 16, 52, 95))
-fit = vglm(y ~ 1, expexp1(ishape = 0.8), acplane, trace = TRUE,
+fit <- vglm(y ~ 1, expexp1(ishape = 0.8), acplane, trace = TRUE,
maxit = 50, checkwz = FALSE)
coef(fit, matrix = TRUE)
Coef(fit) # Authors get c(0.0145, 0.8130) with log-lik -152.264
-fit at misc$shape # Estimate of shape
+fit at misc$shape # Estimate of shape
logLik(fit)
}
\keyword{models}
diff --git a/man/expgeometric.Rd b/man/expgeometric.Rd
index efbbd34..dccb109 100644
--- a/man/expgeometric.Rd
+++ b/man/expgeometric.Rd
@@ -9,7 +9,6 @@
}
\usage{
expgeometric(lscale = "loge", lshape = "logit",
- escale = list(), eshape = list(),
iscale = NULL, ishape = NULL,
tol12 = 1e-05, zero = 1, nsimEIM = 400)
}
@@ -20,11 +19,6 @@ expgeometric(lscale = "loge", lshape = "logit",
See \code{\link{Links}} for more choices.
}
- \item{escale, eshape}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{iscale, ishape}{
Numeric.
Optional initial values for the scale and shape parameters.
@@ -84,6 +78,7 @@ expgeometric(lscale = "loge", lshape = "logit",
}
\examples{
+\dontrun{
scale = exp(2); shape = logit(-1, inverse = TRUE);
edata = data.frame(y = rexpgeom(n = 2000, scale = scale, shape = shape))
fit = vglm(y ~ 1, expgeometric, edata, trace = TRUE)
@@ -92,6 +87,7 @@ coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
}
+}
\keyword{models}
\keyword{regression}
diff --git a/man/explink.Rd b/man/explink.Rd
index e48bdcf..df8ed3f 100644
--- a/man/explink.Rd
+++ b/man/explink.Rd
@@ -8,7 +8,7 @@
}
\usage{
-explink(theta, earg = list(), inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
+explink(theta, bvalue = NULL, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -17,31 +17,21 @@ explink(theta, earg = list(), inverse = FALSE, deriv = 0, short = TRUE, tag = FA
See below for further details.
}
- \item{earg}{
- Optional list.
- See \code{\link{Links}} for general information about \code{earg}.
+% \item{earg}{
+% Optional list.
+% See \code{\link{Links}} for general information about \code{earg}.
+% }
- }
- \item{inverse}{
- Logical. If \code{TRUE} the inverse function is computed.
- The inverse function is the \code{\link{loge}} function.
+ \item{bvalue}{
+ See \code{cloglog}.
}
- \item{deriv}{
- Order of the derivative. Integer with value 0, 1 or 2.
+ \item{inverse, deriv, short, tag}{
+ Details at \code{\link{Links}}.
- }
- \item{short}{
- Used for labelling the \code{blurb} slot of a
- \code{\link{vglmff-class}} object.
}
- \item{tag}{
- Used for labelling the linear/additive predictor in the
- \code{initialize} slot of a \code{\link{vglmff-class}} object.
- Contains a little more information if \code{TRUE}.
- }
}
\details{
@@ -52,10 +42,6 @@ explink(theta, earg = list(), inverse = FALSE, deriv = 0, short = TRUE, tag = FA
\code{0}, \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
- The arguments \code{short} and \code{tag} are used only if
- \code{theta} is character.
-
-
}
@@ -86,12 +72,12 @@ explink(theta, earg = list(), inverse = FALSE, deriv = 0, short = TRUE, tag = FA
\note{
This function has particular use for computing quasi-variances when
- used with \code{\link{rcam}} and \code{\link{normal1}}.
+ used with \code{\link{rcim}} and \code{\link{normal1}}.
Numerical instability may occur when \code{theta} is
close to negative or positive infinity.
- One way of overcoming this (one day) is to use \code{earg}.
+ One way of overcoming this (one day) is to use \code{bvalue}.
}
@@ -99,13 +85,13 @@ explink(theta, earg = list(), inverse = FALSE, deriv = 0, short = TRUE, tag = FA
\seealso{
\code{\link{Links}},
\code{\link{loge}},
- \code{\link{rcam}},
+ \code{\link{rcim}},
\code{\link{Qvar}},
\code{\link{normal1}}.
}
\examples{
-theta = rnorm(30)
+theta <- rnorm(30)
explink(theta)
max(abs(explink(explink(theta), inverse = TRUE) - theta)) # Should be 0
}
diff --git a/man/explogarithmic.Rd b/man/explogarithmic.Rd
index be89376..3235c1d 100644
--- a/man/explogarithmic.Rd
+++ b/man/explogarithmic.Rd
@@ -8,13 +8,13 @@
}
\usage{
-explogarithmic(lscale = "loge", lshape = "logit", escale = list(),
- eshape = list(), iscale = NULL, ishape = NULL,
+explogarithmic(lscale = "loge", lshape = "logit",
+ iscale = NULL, ishape = NULL,
tol12 = 1e-05, zero = 1, nsimEIM = 400)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{lscale, lshape, escale, eshape}{
+ \item{lscale, lshape}{
See \code{\link{CommonVGAMffArguments}} for information.
@@ -78,7 +78,7 @@ explogarithmic(lscale = "loge", lshape = "logit", escale = list(),
}
\examples{
-scale = exp(2); shape = logit(-1, inverse = TRUE);
+\dontrun{ scale = exp(2); shape = logit(-1, inverse = TRUE);
edata = data.frame(y = rexplog(n = 2000, scale = scale, shape = shape))
fit = vglm(y ~ 1, explogarithmic, edata, trace = TRUE)
c(with(edata, median(y)), head(fitted(fit), 1))
@@ -86,6 +86,7 @@ coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
}
+}
\keyword{models}
\keyword{regression}
diff --git a/man/exponential.Rd b/man/exponential.Rd
index 70d9ebb..8e1ea9f 100644
--- a/man/exponential.Rd
+++ b/man/exponential.Rd
@@ -7,7 +7,8 @@
}
\usage{
-exponential(link = "loge", earg = list(), location = 0, expected = TRUE)
+exponential(link = "loge", location = 0, expected = TRUE,
+ shrinkage.init = 0.95, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -16,11 +17,6 @@ exponential(link = "loge", earg = list(), location = 0, expected = TRUE)
See \code{\link{Links}} for more choices.
}
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{location}{
Numeric of length 1, the known location parameter, \eqn{A}, say.
@@ -30,6 +26,10 @@ exponential(link = "loge", earg = list(), location = 0, expected = TRUE)
otherwise Newton-Raphson. The latter is usually faster.
}
+ \item{shrinkage.init, zero}{
+ See \code{\link{CommonVGAMffArguments}} for information.
+
+ }
}
\details{
@@ -51,6 +51,7 @@ exponential(link = "loge", earg = list(), location = 0, expected = TRUE)
}
\references{
+
Evans, M., Hastings, N. and Peacock, B. (2000)
\emph{Statistical Distributions},
New York: Wiley-Interscience, Third edition.
@@ -60,7 +61,7 @@ New York: Wiley-Interscience, Third edition.
\author{ T. W. Yee }
\note{
- Suppose \eqn{A=0}.
+ Suppose \eqn{A = 0}.
For a fixed time interval, the number of events is
Poisson with mean \eqn{\lambda}{rate} if the time
between events has a
@@ -82,6 +83,7 @@ New York: Wiley-Interscience, Third edition.
\code{\link{mix2exp}},
\code{\link{freund61}}.
+
}
\examples{
@@ -94,7 +96,7 @@ with(edata, stem(y))
fit.slow <- vglm(y ~ x2 + x3, exponential, edata, trace = TRUE, crit = "c")
fit.fast <- vglm(y ~ x2 + x3, exponential(exp = FALSE), edata,
- trace = TRUE, crit = "c")
+ trace = TRUE, crit = "coef")
coef(fit.slow, mat = TRUE)
summary(fit.slow)
}
diff --git a/man/exppoisson.Rd b/man/exppoisson.Rd
index a38653a..8dc698c 100644
--- a/man/exppoisson.Rd
+++ b/man/exppoisson.Rd
@@ -8,9 +8,8 @@
}
\usage{
-exppoisson(llambda = "loge", lbetave = "loge", elambda = list(),
- ebetave = list(), ilambda = 1.1, ibetave = 2,
- zero = NULL)
+exppoisson(llambda = "loge", lbetave = "loge",
+ ilambda = 1.1, ibetave = 2, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -19,11 +18,6 @@ exppoisson(llambda = "loge", lbetave = "loge", elambda = list(),
See \code{\link{Links}} for more choices.
}
- \item{elambda, ebetave}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{ilambda, ibetave}{
Numeric.
Initial values for the \code{lambda} and \code{betave} parameters.
diff --git a/man/felix.Rd b/man/felix.Rd
index 8f7051e..594b822 100644
--- a/man/felix.Rd
+++ b/man/felix.Rd
@@ -8,20 +8,21 @@
}
\usage{
-felix(link = "elogit", earg = if (link == "elogit") list(min
- = 0, max = 0.5) else list(), imethod=1)
+felix(link = elogit(min = 0, max = 0.5), imethod = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link, earg}{
- Link function and extra argument for the parameter.
- See \code{\link{Links}} for more choices and for general information.
+ \item{link}{
+ Link function for the parameter;
+ see \code{\link{Links}} for more choices and for general information.
+
}
\item{imethod}{
See \code{\link{CommonVGAMffArguments}}.
Valid values are 1, 2, 3 or 4.
+
}
}
\details{
@@ -60,10 +61,11 @@ Boston: Birkhauser.
\code{\link{dfelix}},
\code{\link{borel.tanner}}.
+
}
\examples{
-fdata = data.frame(y = 2*rpois(n=200, 1) + 1) # Not real data!
-fit = vglm(y ~ 1, felix, fdata, trace=TRUE, crit="c")
+fdata <- data.frame(y = 2*rpois(n = 200, 1) + 1) # Not real data!
+fit <- vglm(y ~ 1, felix, fdata, trace = TRUE, crit = "c")
coef(fit, matrix=TRUE)
Coef(fit)
summary(fit)
diff --git a/man/felixUC.Rd b/man/felixUC.Rd
index c5887cc..6c5908e 100644
--- a/man/felixUC.Rd
+++ b/man/felixUC.Rd
@@ -13,10 +13,10 @@
}
\usage{
-dfelix(x, a=0.25, log=FALSE)
-%pfelix(q, a=0.25)
-%qfelix(p, a=0.25)
-%rfelix(n, a=0.25)
+dfelix(x, a = 0.25, log = FALSE)
+%pfelix(q, a = 0.25)
+%qfelix(p, a = 0.25)
+%rfelix(n, a = 0.25)
}
\arguments{
\item{x}{vector of quantiles.}
diff --git a/man/fff.Rd b/man/fff.Rd
index e1f092a..ecb1f3d 100644
--- a/man/fff.Rd
+++ b/man/fff.Rd
@@ -6,8 +6,8 @@
Maximum likelihood estimation of the (2-parameter) F distribution.
}
\usage{
-fff(link="loge", earg=list(), idf1=NULL, idf2=NULL, nsimEIM=100,
- imethod=1, zero=NULL)
+fff(link = "loge", idf1 = NULL, idf2 = NULL, nsimEIM = 100,
+ imethod = 1, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -17,35 +17,34 @@ fff(link="loge", earg=list(), idf1=NULL, idf2=NULL, nsimEIM=100,
The default keeps the parameters positive.
}
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{idf1, idf2}{
Numeric and positive.
Initial value for the parameters.
The default is to choose each value internally.
}
- \item{nsimEIM}{
+ \item{nsimEIM, zero}{
See \code{\link{CommonVGAMffArguments}} for more information.
+
}
\item{imethod}{
Initialization method. Either the value 1 or 2.
If both fail try setting values for \code{idf1} and \code{idf2}.
- }
- \item{zero}{
- An integer-valued vector specifying which
- linear/additive predictors are modelled as intercepts only.
- The value must be from the set \{1,2\}, corresponding
- respectively to \eqn{df1} and \eqn{df2}.
- By default all linear/additive predictors are modelled as
- a linear combination of the explanatory variables.
}
+% \item{zero}{
+% An integer-valued vector specifying which
+% linear/additive predictors are modelled as intercepts only.
+% The value must be from the set \{1,2\}, corresponding
+% respectively to \eqn{df1} and \eqn{df2}.
+% By default all linear/additive predictors are modelled as
+% a linear combination of the explanatory variables.
+%
+%
+% }
+
}
\details{
The F distribution is named after Fisher and has a density function
@@ -60,8 +59,9 @@ fff(link="loge", earg=list(), idf1=NULL, idf2=NULL, nsimEIM=100,
The estimated mean is returned as the fitted values.
Although the F distribution can be defined to accommodate a
non-centrality parameter \code{ncp}, it is assumed zero here.
- Actually it shouldn't be too difficult to handle any known \code{ncp}; something
- to do in the short future.
+ Actually it shouldn't be too difficult to handle any known
+ \code{ncp}; something to do in the short future.
+
}
\value{
@@ -69,18 +69,23 @@ fff(link="loge", earg=list(), idf1=NULL, idf2=NULL, nsimEIM=100,
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\references{
+
Evans, M., Hastings, N. and Peacock, B. (2000)
\emph{Statistical Distributions},
New York: Wiley-Interscience, Third edition.
+
+
}
\author{ T. W. Yee }
\section{Warning}{
Numerical problems will occur when the estimates of the parameters
are too low or too high.
+
}
%\note{
@@ -93,14 +98,18 @@ New York: Wiley-Interscience, Third edition.
%}
\seealso{
\code{\link[stats:Fdist]{FDist}}.
+
+
}
\examples{
-x = runif(n <- 2000)
-df1 = exp(2+0.5*x)
-df2 = exp(2-0.5*x)
-y = rf(n, df1, df2)
-fit = vglm(y ~ x, fff, trace=TRUE)
-coef(fit, matrix=TRUE)
+\dontrun{
+fdata <- data.frame(x2 = runif(nn <- 2000))
+fdata <- transform(fdata, df1 = exp(2+0.5*x2),
+ df2 = exp(2-0.5*x2))
+fdata <- transform(fdata, y = rf(nn, df1, df2))
+fit <- vglm(y ~ x2, fff, fdata, trace = TRUE)
+coef(fit, matrix = TRUE)
+}
}
\keyword{models}
\keyword{regression}
diff --git a/man/fgm.Rd b/man/fgm.Rd
index 4d6f887..b5ba0d4 100644
--- a/man/fgm.Rd
+++ b/man/fgm.Rd
@@ -9,7 +9,7 @@
}
\usage{
-fgm(lapar="rhobit", earg=list(), iapar=NULL, imethod=1, nsimEIM=200)
+fgm(lapar="rhobit", iapar = NULL, imethod = 1, nsimEIM = 200)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -19,11 +19,6 @@ fgm(lapar="rhobit", earg=list(), iapar=NULL, imethod=1, nsimEIM=200)
See \code{\link{Links}} for more choices.
}
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{iapar}{
Numeric. Optional initial value for \eqn{\alpha}{alpha}.
By default, an initial value is chosen internally.
diff --git a/man/fisherz.Rd b/man/fisherz.Rd
index fc4dbe4..6dfd0e3 100644
--- a/man/fisherz.Rd
+++ b/man/fisherz.Rd
@@ -8,8 +8,8 @@
}
\usage{
-fisherz(theta, earg = list(), inverse = FALSE, deriv = 0,
- short = TRUE, tag = FALSE)
+fisherz(theta, bminvalue = NULL, bmaxvalue = NULL,
+ inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -17,37 +17,26 @@ fisherz(theta, earg = list(), inverse = FALSE, deriv = 0,
Numeric or character.
See below for further details.
+
}
- \item{earg}{
- Optional list. Extra argument for passing in additional information.
+ \item{bminvalue, bmaxvalue}{
+ Optional boundary values.
Values of \code{theta} which are less than or equal to \eqn{-1} can be
- replaced by the \code{bminvalue} component of the list \code{earg}
+ replaced by \code{bminvalue}
before computing the link function value.
Values of \code{theta} which are greater than or equal to \eqn{1} can be
- replaced by the \code{bmaxvalue} component of the list \code{earg}
+ replaced by \code{bmaxvalue}
before computing the link function value.
- See \code{\link{Links}} for general information about \code{earg}.
+ See \code{\link{Links}}.
- }
- \item{inverse}{
- Logical. If \code{TRUE} the inverse function is computed.
}
- \item{deriv}{
- Order of the derivative. Integer with value 0, 1 or 2.
+ \item{inverse, deriv, short, tag}{
+ Details at \code{\link{Links}}.
- }
- \item{short}{
- Used for labelling the \code{blurb} slot of a
- \code{\link{vglmff-class}} object.
}
- \item{tag}{
- Used for labelling the linear/additive predictor in the
- \code{initialize} slot of a \code{\link{vglmff-class}} object.
- Contains a little more information if \code{TRUE}.
- }
}
\details{
The \code{fisherz} link function is commonly used for parameters that
@@ -55,8 +44,9 @@ fisherz(theta, earg = list(), inverse = FALSE, deriv = 0,
Numerical values of \code{theta} close to \eqn{-1} or \eqn{1} or
out of range result in
\code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
- The arguments \code{short} and \code{tag} are used only if
- \code{theta} is character.
+
+
+
}
\value{
For \code{deriv = 0},
@@ -64,28 +54,35 @@ fisherz(theta, earg = list(), inverse = FALSE, deriv = 0,
and if \code{inverse = TRUE} then
\code{(exp(2*theta)-1)/(exp(2*theta)+1)}.
+
For \code{deriv = 1}, then the function returns
\emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
if \code{inverse = FALSE},
else if \code{inverse = TRUE} then it returns the reciprocal.
+
Here, all logarithms are natural logarithms, i.e., to base \emph{e}.
+
}
\references{
McCullagh, P. and Nelder, J. A. (1989)
\emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
+
}
\author{ Thomas W. Yee }
\note{
Numerical instability may occur when \code{theta} is close to \eqn{-1} or
\eqn{1}.
- One way of overcoming this is to use \code{earg}.
+ One way of overcoming this is to use, e.g., \code{bminvalue}.
+
The link function \code{\link{rhobit}} is very similar to \code{fisherz},
e.g., just twice the value of \code{fisherz}.
+
}
\seealso{
@@ -93,19 +90,19 @@ fisherz(theta, earg = list(), inverse = FALSE, deriv = 0,
\code{\link{rhobit}},
\code{\link{logit}}.
- }
-\examples{
-theta = seq(-0.99, 0.99, by=0.01)
-y = fisherz(theta)
-\dontrun{
-plot(theta, y, type="l", las=1, ylab="", main="fisherz(theta)")
-abline(v=0, h=0, lty=2)
-}
-x = c(seq(-1.02, -0.98, by=0.01), seq(0.97, 1.02, by=0.01))
-fisherz(x) # Has NAs
-fisherz(x, earg=list(bminvalue= -1 + .Machine$double.eps,
- bmaxvalue= 1 - .Machine$double.eps)) # Has no NAs
+}
+\examples{
+theta <- seq(-0.99, 0.99, by = 0.01)
+y <- fisherz(theta)
+\dontrun{ plot(theta, y, type = "l", las = 1, ylab = "",
+ main = "fisherz(theta)")
+abline(v = 0, h = 0, lty = 2) }
+
+x <- c(seq(-1.02, -0.98, by = 0.01), seq(0.97, 1.02, by = 0.01))
+fisherz(x) # Has NAs
+fisherz(x, bminvalue = -1 + .Machine$double.eps,
+ bmaxvalue = 1 - .Machine$double.eps) # Has no NAs
}
\keyword{math}
\keyword{models}
diff --git a/man/fisk.Rd b/man/fisk.Rd
index d46548b..3c33863 100644
--- a/man/fisk.Rd
+++ b/man/fisk.Rd
@@ -8,8 +8,8 @@
}
\usage{
-fisk(lshape1.a = "loge", lscale = "loge", eshape1.a = list(),
- escale = list(), ishape1.a = NULL, iscale = NULL, zero = NULL)
+fisk(lshape1.a = "loge", lscale = "loge",
+ ishape1.a = NULL, iscale = NULL, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -19,11 +19,6 @@ fisk(lshape1.a = "loge", lscale = "loge", eshape1.a = list(),
See \code{\link{Links}} for more choices.
}
- \item{eshape1.a, escale}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{ishape1.a, iscale}{
Optional initial values for \code{a} and \code{scale}.
@@ -96,9 +91,9 @@ Hoboken, NJ: Wiley-Interscience.
}
\examples{
-fdata = data.frame(y = rfisk(n = 200, exp(1), exp(2)))
-fit = vglm(y ~ 1, fisk, fdata, trace = TRUE)
-fit = vglm(y ~ 1, fisk(ishape1.a = exp(1)), fdata, trace = TRUE)
+fdata <- data.frame(y = rfisk(n = 200, exp(1), exp(2)))
+fit <- vglm(y ~ 1, fisk, fdata, trace = TRUE)
+fit <- vglm(y ~ 1, fisk(ishape1.a = exp(1)), fdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/fiskUC.Rd b/man/fiskUC.Rd
index 3d8f4cc..2f045ae 100644
--- a/man/fiskUC.Rd
+++ b/man/fiskUC.Rd
@@ -9,6 +9,7 @@
Density, distribution function, quantile function and random
generation for the Fisk distribution with shape parameter \code{a}
and scale parameter \code{scale}.
+
}
\usage{
dfisk(x, shape1.a, scale = 1, log = FALSE)
diff --git a/man/fittedvlm.Rd b/man/fittedvlm.Rd
index 7679474..c9a16f8 100644
--- a/man/fittedvlm.Rd
+++ b/man/fittedvlm.Rd
@@ -8,17 +8,29 @@
inherits from a \emph{vector linear model} (VLM), e.g., a model of
class \code{"vglm"}.
+
}
\usage{
fittedvlm(object, matrix.arg = TRUE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{object}{ a model object that inherits from a VLM.
- }
- \item{matrix.arg}{ Logical. Return the answer as a matrix?
- If \code{FALSE} then it will be a vector. }
- \item{\dots}{ Currently unused. }
+ \item{object}{
+ a model object that inherits from a VLM.
+
+
+ }
+ \item{matrix.arg}{
+ Logical. Return the answer as a matrix?
+ If \code{FALSE} then it will be a vector.
+
+
+ }
+ \item{\dots}{
+ Currently unused.
+
+
+ }
}
\details{
@@ -28,11 +40,17 @@ fittedvlm(object, matrix.arg = TRUE, ...)
The mean may even not exist, e.g., for a Cauchy distribution.
+ Note that the fitted value is output from the \code{@linkinv} slot
+ of the \pkg{VGAM} family function,
+ where the \code{eta} argument is the \eqn{n \times M}{n x M} matrix
+ of linear predictors.
+
+
+
+
}
\value{
- The fitted values as returned by the
- \code{inverse} slot of the \pkg{VGAM} family function,
- evaluated at the final IRLS iteration.
+ The fitted values evaluated at the final IRLS iteration.
}
@@ -53,7 +71,8 @@ Chambers, J. M. and T. J. Hastie (eds) (1992)
If \code{fit} is a VLM or VGLM then \code{fitted(fit)} and
- \code{predict(fit, type="response")} should be equivalent.
+ \code{predict(fit, type = "response")} should be equivalent
+ (see \code{\link{predictvglm}}).
The latter has the advantage in that it handles a \code{newdata}
argument so that the fitted values can be computed for a
different data set.
@@ -75,11 +94,11 @@ pneumo = transform(pneumo, let = log(exposure.time))
fitted(fit)
# LMS quantile regression example 2
-fit = vgam(BMI ~ s(age, df = c(4,2)),
+fit = vgam(BMI ~ s(age, df = c(4, 2)),
fam = lms.bcn(zero = 1), data = bmi.nz, trace = TRUE)
-head(predict(fit, type = "r")) # The following three are equal
+head(predict(fit, type = "response")) # The following three are equal
head(fitted(fit))
-predict(fit, type = "r", newdata = head(bmi.nz))
+predict(fit, type = "response", newdata = head(bmi.nz))
}
\keyword{models}
\keyword{regression}
diff --git a/man/fnormUC.Rd b/man/fnormUC.Rd
index e24e6dc..8ebd1ed 100644
--- a/man/fnormUC.Rd
+++ b/man/fnormUC.Rd
@@ -11,10 +11,10 @@
}
\usage{
-dfnorm(x, mean=0, sd=1, a1=1, a2=1)
-pfnorm(q, mean=0, sd=1, a1=1, a2=1)
-qfnorm(p, mean=0, sd=1, a1=1, a2=1, ...)
-rfnorm(n, mean=0, sd=1, a1=1, a2=1)
+dfnorm(x, mean = 0, sd = 1, a1 = 1, a2 = 1)
+pfnorm(q, mean = 0, sd = 1, a1 = 1, a2 = 1)
+qfnorm(p, mean = 0, sd = 1, a1 = 1, a2 = 1, ...)
+rfnorm(n, mean = 0, sd = 1, a1 = 1, a2 = 1)
}
\arguments{
\item{x, q}{vector of quantiles.}
@@ -55,20 +55,20 @@ rfnorm(n, mean=0, sd=1, a1=1, a2=1)
}
\examples{
\dontrun{
-m = 1.5; SD=exp(0)
-x = seq(-1, 4, len=501)
-plot(x, dfnorm(x, m=m, sd=SD), type="l", ylim=0:1, las=1,
- ylab=paste("fnorm(m=", m, ", sd=", round(SD, dig=3), ")"), col="blue",
- main="Blue is density, red is cumulative distribution function",
- sub="Purple lines are the 10,20,...,90 percentiles")
-lines(x, pfnorm(x, m=m, sd=SD), col="red")
-abline(h=0)
-probs = seq(0.1, 0.9, by=0.1)
-Q = qfnorm(probs, m=m, sd=SD)
-lines(Q, dfnorm(Q, m=m, sd=SD), col="purple", lty=3, type="h")
-lines(Q, pfnorm(Q, m=m, sd=SD), col="purple", lty=3, type="h")
-abline(h=probs, col="purple", lty=3)
-max(abs(pfnorm(Q, m=m, sd=SD) - probs)) # Should be 0
+m <- 1.5; SD<-exp(0)
+x <- seq(-1, 4, len = 501)
+plot(x, dfnorm(x, m = m, sd = SD), type = "l", ylim = 0:1, las = 1,
+ ylab = paste("fnorm(m = ", m, ", sd = ", round(SD, dig = 3), ")"),
+ main = "Blue is density, red is cumulative distribution function",
+ sub = "Purple lines are the 10,20,...,90 percentiles", col = "blue")
+lines(x, pfnorm(x, m = m, sd = SD), col = "red")
+abline(h = 0)
+probs <- seq(0.1, 0.9, by = 0.1)
+Q <- qfnorm(probs, m = m, sd = SD)
+lines(Q, dfnorm(Q, m = m, sd = SD), col = "purple", lty = 3, type = "h")
+lines(Q, pfnorm(Q, m = m, sd = SD), col = "purple", lty = 3, type = "h")
+abline(h = probs, col = "purple", lty = 3)
+max(abs(pfnorm(Q, m = m, sd = SD) - probs)) # Should be 0
}
}
\keyword{distribution}
diff --git a/man/fnormal1.Rd b/man/fnormal1.Rd
index 006cb95..8755ca2 100644
--- a/man/fnormal1.Rd
+++ b/man/fnormal1.Rd
@@ -6,8 +6,8 @@
Fits a (generalized) folded (univariate) normal distribution.
}
\usage{
-fnormal1(lmean="identity", lsd="loge", emean=list(), esd=list(), imean=NULL,
- isd=NULL, a1=1, a2=1, nsimEIM=500, imethod=1, zero=NULL)
+fnormal1(lmean = "identity", lsd = "loge", imean = NULL, isd = NULL,
+ a1 = 1, a2 = 1, nsimEIM = 500, imethod = 1, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -19,14 +19,19 @@ fnormal1(lmean="identity", lsd="loge", emean=list(), esd=list(), imean=NULL,
See \code{\link{Links}} for more choices.
}
- \item{emean, esd}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
- }
+
+% \item{emean, esd}{
+% List. Extra argument for each of the links.
+% See \code{earg} in \code{\link{Links}} for general information.
+% emean=list(), esd=list(),
+% }
+
+
\item{imean, isd}{
Optional initial values for \eqn{\mu}{mu} and \eqn{\sigma}{sigma}.
A \code{NULL} means a value is computed internally.
+ See \code{\link{CommonVGAMffArguments}}.
}
\item{a1, a2}{
@@ -81,6 +86,7 @@ fnormal1(lmean="identity", lsd="loge", emean=list(), esd=list(), imean=NULL,
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\references{
Lin, P. C. (2005)
@@ -89,6 +95,7 @@ fnormal1(lmean="identity", lsd="loge", emean=list(), esd=list(), imean=NULL,
\emph{International Journal of Advanced Manufacturing Technology},
\bold{26}, 825--830.
+
}
\author{ Thomas W. Yee }
\note{
@@ -101,6 +108,7 @@ fnormal1(lmean="identity", lsd="loge", emean=list(), esd=list(), imean=NULL,
See \code{\link{CommonVGAMffArguments}} for general information about
many of these arguments.
+
}
\section{Warning }{
@@ -108,23 +116,28 @@ fnormal1(lmean="identity", lsd="loge", emean=list(), esd=list(), imean=NULL,
It is recommended that several different initial values be used
to help avoid local solutions.
+
}
\seealso{
\code{\link{rfnorm}},
\code{\link{normal1}},
\code{\link[stats:Normal]{dnorm}},
\code{\link{skewnormal1}}.
+
+
}
\examples{
-m = 2; SD = exp(1)
-y = rfnorm(n <- 1000, m=m, sd=SD)
-\dontrun{hist(y, prob=TRUE, main=paste("fnormal1(m=",m,", sd=",round(SD,2),")"))}
-fit = vglm(y ~ 1, fam=fnormal1, trace=TRUE)
-coef(fit, mat=TRUE)
-(Cfit = Coef(fit))
-mygrid = seq(min(y), max(y), len=200) # Add the fit to the histogram
-\dontrun{lines(mygrid, dfnorm(mygrid, Cfit[1], Cfit[2]), col="red")}
+\dontrun{ m <- 2; SD <- exp(1)
+y <- rfnorm(n <- 1000, m = m, sd = SD)
+hist(y, prob = TRUE, main = paste("fnormal1(m = ", m,
+ ", sd = ", round(SD, 2), ")"))
+fit <- vglm(y ~ 1, fam = fnormal1, trace = TRUE)
+coef(fit, matrix = TRUE)
+(Cfit <- Coef(fit))
+mygrid <- seq(min(y), max(y), len = 200) # Add the fit to the histogram
+lines(mygrid, dfnorm(mygrid, Cfit[1], Cfit[2]), col = "orange")
+}
}
\keyword{models}
\keyword{regression}
diff --git a/man/frank.Rd b/man/frank.Rd
index 52f2fd0..c8e6571 100644
--- a/man/frank.Rd
+++ b/man/frank.Rd
@@ -8,7 +8,7 @@
}
\usage{
-frank(lapar="loge", eapar=list(), iapar=2, nsimEIM=250)
+frank(lapar = "loge", iapar = 2, nsimEIM = 250)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -17,20 +17,18 @@ frank(lapar="loge", eapar=list(), iapar=2, nsimEIM=250)
\eqn{\alpha}{alpha}.
See \code{\link{Links}} for more choices.
- }
- \item{eapar}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
}
\item{iapar}{
Numeric. Initial value for \eqn{\alpha}{alpha}.
If a convergence failure occurs try assigning a different value.
+
}
\item{nsimEIM}{
See \code{\link{CommonVGAMffArguments}}.
+
}
}
\details{
@@ -45,6 +43,7 @@ frank(lapar="loge", eapar=list(), iapar=2, nsimEIM=250)
Note the logarithm here is to base \eqn{\alpha}{alpha}.
The support of the function is the unit square.
+
When \eqn{0 < \alpha < 1}{0<alpha<1} the probability density function
\eqn{h_{\alpha}(y_1,y_2)}{h_{alpha}(y_1,y_2)}
is symmetric with respect to the lines \eqn{y_2=y_1}{y2=y1}
@@ -52,6 +51,7 @@ frank(lapar="loge", eapar=list(), iapar=2, nsimEIM=250)
When \eqn{\alpha > 1}{alpha>1} then
\eqn{h_{\alpha}(y_1,y_2) = h_{1/\alpha}(1-y_1,y_2)}{h_{1/alpha}(1-y_1,y_2)}.
+
If \eqn{\alpha=1}{alpha=1} then \eqn{H(y_1,y_2) = y_1 y_2}{H(y1,y2)=y1*y2},
i.e., uniform on the unit square.
As \eqn{\alpha}{alpha} approaches 0 then
@@ -59,16 +59,20 @@ frank(lapar="loge", eapar=list(), iapar=2, nsimEIM=250)
As \eqn{\alpha}{alpha} approaches infinity then
\eqn{H(y_1,y_2) = \max(0, y_1+y_2-1)}{H(y1,y2)=max(0,y1+y2-1)}.
+
The default is to use Fisher scoring implemented using
\code{\link{rfrank}}.
For intercept-only models an alternative is to set \code{nsimEIM=NULL}
so that a variant of Newton-Raphson is used.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
+
}
%% improve the references
@@ -79,31 +83,36 @@ Frank's family of bivariate distributions.
\emph{Biometrika},
\bold{74}, 549--555.
+
}
\author{ T. W. Yee }
\note{
- The response must be a two-column matrix. Currently, the fitted
+ The response must be a two-column matrix. Currently, the fitted
value is a matrix with two columns and values equal to a half.
This is because the marginal distributions correspond to a standard
uniform distribution.
+
}
\seealso{
\code{\link{rfrank}},
\code{\link{fgm}}.
+
}
\examples{
-ymat = rfrank(n=2000, alpha=exp(4))
-\dontrun{plot(ymat, col="blue")}
-fit = vglm(ymat ~ 1, fam=frank, trace=TRUE)
-coef(fit, matrix=TRUE)
+\dontrun{
+ymat <- rfrank(n = 2000, alpha = exp(4))
+plot(ymat, col = "blue")
+fit <- vglm(ymat ~ 1, fam = frank, trace = TRUE)
+coef(fit, matrix = TRUE)
Coef(fit)
vcov(fit)
head(fitted(fit))
summary(fit)
}
+}
\keyword{models}
\keyword{regression}
diff --git a/man/frankUC.Rd b/man/frankUC.Rd
index d91e3f2..2f10fbc 100644
--- a/man/frankUC.Rd
+++ b/man/frankUC.Rd
@@ -10,7 +10,7 @@
}
\usage{
-dfrank(x1, x2, alpha, log=FALSE)
+dfrank(x1, x2, alpha, log = FALSE)
pfrank(q1, q2, alpha)
rfrank(n, alpha)
}
@@ -21,7 +21,7 @@ rfrank(n, alpha)
\item{alpha}{the positive association parameter \eqn{\alpha}{alpha}.}
\item{log}{
Logical.
- If \code{log=TRUE} then the logarithm of the density is returned.
+ If \code{log = TRUE} then the logarithm of the density is returned.
}
@@ -54,20 +54,19 @@ Frank's family of bivariate distributions.
}
\examples{
\dontrun{
-N = 100
-x = seq(-0.30, 1.30, len=N)
-alpha = 8
-ox = expand.grid(x, x)
-z = dfrank(ox[,1], ox[,2], alp=alpha)
+N <- 100; alpha <- 8
+x <- seq(-0.30, 1.30, len = N)
+ox <- expand.grid(x, x)
+z <- dfrank(ox[, 1], ox[, 2], alpha = alpha)
contour(x, x, matrix(z, N, N))
-z = pfrank(ox[,1], ox[,2], alp=alpha)
+z <- pfrank(ox[, 1], ox[, 2], alpha = alpha)
contour(x, x, matrix(z, N, N))
-alpha = exp(4)
-plot(r <- rfrank(n=3000, alpha=alpha))
-par(mfrow=c(1,2))
-hist(r[,1]) # Should be uniform
-hist(r[,2]) # Should be uniform
+alpha <- exp(4)
+plot(r <- rfrank(n = 3000, alpha = alpha))
+par(mfrow = c(1, 2))
+hist(r[, 1]) # Should be uniform
+hist(r[, 2]) # Should be uniform
}
}
\keyword{distribution}
diff --git a/man/frechet.Rd b/man/frechet.Rd
index 6a3d886..3bebcbc 100644
--- a/man/frechet.Rd
+++ b/man/frechet.Rd
@@ -12,12 +12,11 @@
}
\usage{
-frechet2(location = 0, lscale = "loge", lshape = "logoff",
- escale = list(), eshape = list(offset = -2), iscale = NULL,
- ishape = NULL, nsimEIM = 250, zero = NULL)
+frechet2(location = 0, lscale = "loge", lshape = logoff(offset = -2),
+ iscale = NULL, ishape = NULL, nsimEIM = 250, zero = NULL)
%frechet3(anchor = NULL, ldifference = "loge", lscale = "loge",
-% lshape = "loglog", edifference = list(), escale = list(),
-% eshape = list(), ilocation = NULL, iscale = NULL, ishape = NULL,
+% lshape = "loglog",
+% ilocation = NULL, iscale = NULL, ishape = NULL,
% zero = NULL, effpos = .Machine$double.eps^0.75)
}
%- maybe also 'usage' for other objects documented here.
@@ -27,19 +26,20 @@ frechet2(location = 0, lscale = "loge", lshape = "logoff",
It is called \eqn{a} below.
}
- \item{lscale, lshape, escale, eshape}{
- Link functions and extra arguments for the parameters.
- See \code{\link{Links}} for more choices.
+ \item{lscale, lshape}{
+ Link functions for the parameters;
+ see \code{\link{Links}} for more choices.
}
\item{iscale, ishape, zero, nsimEIM}{
See \code{\link{CommonVGAMffArguments}} for information.
}
-% \item{edifference}{ %
+
+
+% \item{edifference}{ %
% Extra argument for the respective links.
% See \code{earg} in \code{\link{Links}} for general information.
-
% }
@@ -107,13 +107,16 @@ frechet2(location = 0, lscale = "loge", lshape = "logoff",
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\references{
+
Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005)
\emph{Extreme Value and Related Models with Applications
in Engineering and Science},
Hoboken, NJ, USA: Wiley-Interscience.
+
}
\author{ T. W. Yee }
\section{Warning}{
@@ -125,6 +128,7 @@ Hoboken, NJ, USA: Wiley-Interscience.
Family function \code{frechet2} may fail for low values of
the shape parameter, e.g., near 2 or lower.
+
}
%\note{
@@ -148,12 +152,13 @@ Hoboken, NJ, USA: Wiley-Interscience.
\code{\link{rfrechet}},
\code{\link{gev}}.
+
}
\examples{
set.seed(123)
-fdata = data.frame(y1 = rfrechet(nn <- 1000, shape = 2 + exp(1)))
+fdata <- data.frame(y1 = rfrechet(nn <- 1000, shape = 2 + exp(1)))
\dontrun{ with(fdata, hist(y1)) }
-fit2 = vglm(y1 ~ 1, frechet2, fdata, trace = TRUE)
+fit2 <- vglm(y1 ~ 1, frechet2, fdata, trace = TRUE)
coef(fit2, matrix = TRUE)
Coef(fit2)
head(fitted(fit2))
diff --git a/man/freund61.Rd b/man/freund61.Rd
index c52266c..b6548fa 100644
--- a/man/freund61.Rd
+++ b/man/freund61.Rd
@@ -10,14 +10,13 @@
}
\usage{
freund61(la = "loge", lap = "loge", lb = "loge", lbp = "loge",
- ea = list(), eap = list(), eb = list(), ebp = list(),
ia = NULL, iap = NULL, ib = NULL, ibp = NULL,
independent = FALSE, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{la,lap,lb,lbp,ea,eap,eb,ebp}{
- Link functions and extra arguments applied to the (positive)
+ \item{la, lap, lb, lbp}{
+ Link functions applied to the (positive)
parameters \eqn{\alpha}{alpha}, \eqn{\alpha'}{alpha'},
\eqn{\beta}{beta} and \eqn{\beta'}{beta'}, respectively
(the ``\code{p}'' stands for ``prime'').
@@ -25,7 +24,7 @@ freund61(la = "loge", lap = "loge", lb = "loge", lbp = "loge",
}
- \item{ia,iap,ib,ibp}{
+ \item{ia, iap, ib, ibp}{
Initial value for the four parameters respectively.
The default is to estimate them all internally.
diff --git a/man/fsqrt.Rd b/man/fsqrt.Rd
index b4117fe..1c2cc79 100644
--- a/man/fsqrt.Rd
+++ b/man/fsqrt.Rd
@@ -8,7 +8,7 @@
}
\usage{
-fsqrt(theta, earg = list(min = 0, max = 1, mux = sqrt(2)),
+fsqrt(theta, min = 0, max = 1, mux = sqrt(2),
inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
}
%- maybe also 'usage' for other objects documented here.
@@ -18,30 +18,16 @@ fsqrt(theta, earg = list(min = 0, max = 1, mux = sqrt(2)),
See below for further details.
}
- \item{earg}{
- List with components \code{min}, \code{max} and \code{mux}.
+ \item{min, max, mux}{
These are called \eqn{L}, \eqn{U} and \eqn{K} below.
}
- \item{inverse}{
- Logical. If \code{TRUE} the inverse function is computed.
+ \item{inverse, deriv, short, tag}{
+ Details at \code{\link{Links}}.
- }
- \item{deriv}{
- Order of the derivative. Integer with value 0, 1 or 2.
-
- }
- \item{short}{
- Used for labelling the \code{blurb} slot of a
- \code{\link{vglmff-class}} object.
}
- \item{tag}{
- Used for labelling the linear/additive predictor in the
- \code{initialize} slot of a \code{\link{vglmff-class}} object.
- Contains a little more information if \code{TRUE}.
- }
}
\details{
The folded square root link function can be applied to
@@ -50,9 +36,6 @@ fsqrt(theta, earg = list(min = 0, max = 1, mux = sqrt(2)),
out of range result in \code{NA} or \code{NaN}.
- The arguments \code{short} and \code{tag} are used only if
- \code{theta} is character.
-
}
\value{
@@ -102,7 +85,7 @@ fsqrt(p)
max(abs(fsqrt(fsqrt(p), inverse = TRUE) - p)) # Should be 0
p = c(seq(-0.02, 0.02, by = 0.01), seq(0.97, 1.02, by = 0.01))
-fsqrt(p) # Has NAs
+fsqrt(p) # Has NAs
\dontrun{
p = seq(0.01, 0.99, by = 0.01)
@@ -146,9 +129,7 @@ par(lwd = 1)
}
# This is lucky to converge
-earg = list(min = 0, max = 1, mux = 5)
-fit.h = vglm(agaaus ~ bs(altitude),
- fam = binomialff(link = "fsqrt", earg = earg),
+fit.h <- vglm(agaaus ~ bs(altitude), binomialff(link = fsqrt(mux = 5)),
data = hunua, trace = TRUE)
\dontrun{
plotvgam(fit.h, se = TRUE, lcol = "orange", scol = "orange",
@@ -158,11 +139,10 @@ head(predict(fit.h, hunua, type = "response"))
\dontrun{
# The following fails.
-pneumo = transform(pneumo, let = log(exposure.time))
-earg = list(min = 0, max = 1, mux = 10)
-fit = vglm(cbind(normal, mild, severe) ~ let,
- cumulative(link = "fsqrt", earg = earg, par = TRUE, rev = TRUE),
- data = pneumo, trace = TRUE, maxit = 200) }
+pneumo <- transform(pneumo, let = log(exposure.time))
+fit <- vglm(cbind(normal, mild, severe) ~ let,
+ cumulative(link = fsqrt(mux = 10), par = TRUE, rev = TRUE),
+ data = pneumo, trace = TRUE, maxit = 200) }
}
\keyword{math}
\keyword{models}
diff --git a/man/gamma1.Rd b/man/gamma1.Rd
index acd9333..48df8ff 100644
--- a/man/gamma1.Rd
+++ b/man/gamma1.Rd
@@ -8,18 +8,18 @@
}
\usage{
-gamma1(link = "loge", earg=list())
+gamma1(link = "loge", zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{link}{
Link function applied to the (positive) \emph{shape} parameter.
- See \code{\link{Links}} for more choices.
+ See \code{\link{Links}} for more choices and general information.
}
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
+ \item{zero}{
+ Details at \code{\link{CommonVGAMffArguments}}.
+
}
}
@@ -34,45 +34,55 @@ gamma1(link = "loge", earg=list())
is \eqn{\mu=shape}{mu=shape}, and the variance is
\eqn{\sigma^2 = shape}{sigma^2 = shape}.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
+
}
\references{
Most standard texts on statistical distributions describe
the 1-parameter gamma distribution, e.g.,
+
Evans, M., Hastings, N. and Peacock, B. (2000)
\emph{Statistical Distributions},
New York: Wiley-Interscience, Third edition.
+
}
\author{ T. W. Yee }
\note{
- This \pkg{VGAM} family function can handle a multivariate (matrix)
- response.
+ This \pkg{VGAM} family function can handle a multiple
+ responses, which is inputted as a matrix.
+
The parameter \eqn{shape} matches with \code{shape} in
\code{\link[stats]{rgamma}}. The argument
\code{rate} in \code{\link[stats]{rgamma}} is assumed
1 for this family function.
+
If \eqn{rate} is unknown use the family function
\code{\link{gamma2.ab}} to estimate it too.
+
}
\seealso{
\code{\link{gamma2.ab}} for the 2-parameter gamma distribution,
- \code{\link{lgammaff}}.
+ \code{\link{lgammaff}},
+ \code{\link{lindley}}.
+
}
\examples{
-gdata = data.frame(y = rgamma(n=100, shape= exp(3)))
-fit = vglm(y ~ 1, gamma1, gdata, trace=TRUE, crit="c")
-coef(fit, matrix=TRUE)
+gdata <- data.frame(y = rgamma(n = 100, shape = exp(3)))
+fit <- vglm(y ~ 1, gamma1, gdata, trace = TRUE, crit = "coef")
+coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
}
diff --git a/man/gamma2.Rd b/man/gamma2.Rd
index 877af3f..6f1ce29 100644
--- a/man/gamma2.Rd
+++ b/man/gamma2.Rd
@@ -8,8 +8,10 @@
}
\usage{
-gamma2(lmu = "loge", lshape = "loge", emu = list(), eshape = list(),
- imethod = 1, deviance.arg = FALSE, ishape = NULL, zero = -2)
+gamma2(lmu = "loge", lshape = "loge",
+ imethod = 1, ishape = NULL,
+ parallel = FALSE, intercept.apply = FALSE,
+ deviance.arg = FALSE, zero = -2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -18,10 +20,6 @@ gamma2(lmu = "loge", lshape = "loge", emu = list(), eshape = list(),
parameters (called \eqn{\mu}{mu} and \eqn{\lambda}{shape} respectively).
See \code{\link{Links}} for more choices.
- }
- \item{emu, eshape}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
}
\item{ishape}{
@@ -31,6 +29,7 @@ gamma2(lmu = "loge", lshape = "loge", emu = list(), eshape = list(),
This argument is ignored if used within \code{\link{cqo}}; see the
\code{iShape} argument of \code{\link{qrrvglm.control}} instead.
+
}
\item{imethod}{
An integer with value \code{1} or \code{2} which
@@ -38,6 +37,7 @@ gamma2(lmu = "loge", lshape = "loge", emu = list(), eshape = list(),
If failure to converge occurs
try another value (and/or specify a value for \code{ishape}).
+
}
\item{deviance.arg}{
Logical. If \code{TRUE}, the deviance function
@@ -48,6 +48,7 @@ gamma2(lmu = "loge", lshape = "loge", emu = list(), eshape = list(),
It should be set \code{TRUE} only when used with \code{\link{cqo}}
under the fast algorithm.
+
}
\item{zero}{
% An integer specifying which
@@ -66,6 +67,12 @@ gamma2(lmu = "loge", lshape = "loge", emu = list(), eshape = list(),
all shape parameters are intercept only.
See \code{\link{CommonVGAMffArguments}} for more information.
+
+ }
+ \item{parallel, intercept.apply}{
+ See \code{\link{CommonVGAMffArguments}} for more information.
+
+
}
}
\details{
@@ -118,9 +125,11 @@ gamma2(lmu = "loge", lshape = "loge", emu = list(), eshape = list(),
The parameterization of this \pkg{VGAM} family function is the
2-parameter gamma distribution described in the monograph
+
McCullagh, P. and Nelder, J. A. (1989)
\emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
}
\author{ T. W. Yee }
\note{
@@ -128,15 +137,18 @@ McCullagh, P. and Nelder, J. A. (1989)
A moment estimator for the shape parameter may be implemented in
the future.
- If \code{mu} and \code{shape} are vectors, then \code{rgamma(n=n,
- shape=shape, scale=mu/shape)} will generate random gamma variates of this
+
+ If \code{mu} and \code{shape} are vectors, then \code{rgamma(n = n,
+ shape = shape, scale = mu/shape)} will generate random gamma variates of this
parameterization, etc.;
see \code{\link[stats]{GammaDist}}.
+
For \code{\link{cqo}} and \code{\link{cao}}, taking the logarithm
of the response means (approximately) a \code{\link{gaussianff}} family
may be used on the transformed data.
+
}
\seealso{
@@ -149,6 +161,7 @@ McCullagh, P. and Nelder, J. A. (1989)
\code{\link{golf}},
\code{\link{CommonVGAMffArguments}}.
+
}
\examples{
# Essentially a 1-parameter gamma
diff --git a/man/gamma2.ab.Rd b/man/gamma2.ab.Rd
index b1e7957..22bd6fd 100644
--- a/man/gamma2.ab.Rd
+++ b/man/gamma2.ab.Rd
@@ -7,7 +7,6 @@
}
\usage{
gamma2.ab(lrate = "loge", lshape = "loge",
- erate = list(), eshape = list(),
irate = NULL, ishape = NULL, expected = TRUE, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
@@ -18,11 +17,6 @@ gamma2.ab(lrate = "loge", lshape = "loge",
See \code{\link{Links}} for more choices.
}
- \item{erate, eshape}{
- List. Extra arguments for the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{expected}{
Logical. Use Fisher scoring? The default is yes, otherwise
Newton-Raphson is used.
diff --git a/man/gammahyp.Rd b/man/gammahyp.Rd
index 3fa60fd..8c373e7 100644
--- a/man/gammahyp.Rd
+++ b/man/gammahyp.Rd
@@ -7,7 +7,7 @@
by maximum likelihood estimation.
}
\usage{
-gammahyp(ltheta="loge", itheta=NULL, expected=FALSE)
+gammahyp(ltheta = "loge", itheta = NULL, expected = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -69,13 +69,13 @@ Asymptotics and the theory of inference.
\code{\link{exponential}}.
}
\examples{
-gdata = data.frame(x = runif(nn <- 1000))
-gdata = transform(gdata, theta = exp(-2+x))
-gdata = transform(gdata, y1 = rexp(nn, rate=exp(-theta)/theta),
- y2 = rexp(nn, rate=theta) + 1)
-fit = vglm(cbind(y1,y2) ~ x, fam=gammahyp(expected=TRUE), gdata)
-fit = vglm(cbind(y1,y2) ~ x, fam=gammahyp, gdata, trace=TRUE, crit="coef")
-coef(fit, matrix=TRUE)
+gdata <- data.frame(x = runif(nn <- 1000))
+gdata <- transform(gdata, theta = exp(-2+x))
+gdata <- transform(gdata, y1 = rexp(nn, rate = exp(-theta)/theta),
+ y2 = rexp(nn, rate = theta) + 1)
+fit <- vglm(cbind(y1,y2) ~ x, fam = gammahyp(expected = TRUE), gdata)
+fit <- vglm(cbind(y1,y2) ~ x, fam = gammahyp, gdata, trace = TRUE, crit = "coef")
+coef(fit, matrix = TRUE)
Coef(fit)
head(fitted(fit))
summary(fit)
diff --git a/man/garma.Rd b/man/garma.Rd
index 6c68f30..40e4ebc 100644
--- a/man/garma.Rd
+++ b/man/garma.Rd
@@ -7,7 +7,7 @@
}
\usage{
-garma(link = "identity", earg=list(), p.ar.lag = 1, q.ma.lag = 0,
+garma(link = "identity", p.ar.lag = 1, q.ma.lag = 0,
coefstart = NULL, step = 1)
}
%- maybe also 'usage' for other objects documented here.
@@ -16,20 +16,16 @@ garma(link = "identity", earg=list(), p.ar.lag = 1, q.ma.lag = 0,
Link function applied to the mean response.
The default is suitable for continuous responses.
The link \code{\link{loge}} should be chosen if the data are counts.
- Links such as \code{\link{logit}}, \code{\link{probit}},
- \code{\link{cloglog}},
- \code{\link{cauchit}} are suitable for binary responses.
+ The link \code{\link{reciprocal}} can be chosen if the data are counts
+ and the variance assumed for this is \eqn{\mu^2}{mu^2}.
+ The links \code{\link{logit}}, \code{\link{probit}},
+ \code{\link{cloglog}}, and
+ \code{\link{cauchit}} are supported and suitable for binary responses.
- }
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
- In particular, this argument is useful
- when the log or logit link is chosen:
+ Note that when the log or logit link is chosen:
for log and logit,
- zero values can be replaced by \code{bvalue} which
- is inputted as \code{earg=list(bvalue = bvalue)}.
+ zero values can be replaced by \code{bvalue}.
See \code{\link{loge}} and \code{\link{logit}} etc. for specific
information about each link function.
@@ -51,6 +47,7 @@ garma(link = "identity", earg=list(), p.ar.lag = 1, q.ma.lag = 0,
}
\item{coefstart}{
Starting values for the coefficients.
+ Assigning this argument is highly recommended.
For technical reasons, the
argument \code{coefstart} in \code{\link{vglm}} cannot be used.
@@ -74,10 +71,9 @@ garma(link = "identity", earg=list(), p.ar.lag = 1, q.ma.lag = 0,
See also Benjamin \emph{et al.} (2003).
GARMA models extend the ARMA time series model to generalized
responses in the exponential family, e.g., Poisson counts,
- binary responses. Currently, this function can handle continuous,
- count and binary responses only. The possible link functions
- given in the \code{link} argument reflect this, and the user
- must choose an appropriate link.
+ binary responses. Currently, this function is rudimentary and
+ can handle only certain continuous, count and binary responses only.
+ The user must choose an appropriate link for the \code{link} argument.
The GARMA(\eqn{p, q}) model is defined by firstly
@@ -144,9 +140,9 @@ garma(link = "identity", earg=list(), p.ar.lag = 1, q.ma.lag = 0,
\author{ T. W. Yee }
\note{
- This function is unpolished and is requires lots
- of improvements. In particular, initialization is quite poor,
- and ought to be improved.
+ This function is unpolished and is requires \emph{lots} of improvements.
+ In particular, initialization is \emph{very poor}.
+ Results appear \emph{very} sensitive to quality of initial values.
A limited amount of experience has shown that half-stepsizing is
often needed for convergence, therefore choosing \code{crit = "coef"}
is not recommended.
@@ -173,6 +169,7 @@ garma(link = "identity", earg=list(), p.ar.lag = 1, q.ma.lag = 0,
% \code{\link{identity}},
% \code{\link{logit}}.
+
The site \url{http://www.stat.auckland.ac.nz/~yee} contains
more documentation about this family function.
@@ -180,7 +177,7 @@ garma(link = "identity", earg=list(), p.ar.lag = 1, q.ma.lag = 0,
}
\examples{
-gdata = data.frame(interspike = c(68, 41, 82, 66, 101, 66, 57, 41, 27, 78,
+gdata <- data.frame(interspike = c(68, 41, 82, 66, 101, 66, 57, 41, 27, 78,
59, 73, 6, 44, 72, 66, 59, 60, 39, 52,
50, 29, 30, 56, 76, 55, 73, 104, 104, 52,
25, 33, 20, 60, 47, 6, 47, 22, 35, 30,
@@ -190,14 +187,14 @@ gdata = data.frame(interspike = c(68, 41, 82, 66, 101, 66, 57, 41, 27, 78,
19, 18, 14, 23, 18, 22, 18, 19, 26, 27,
23, 24, 35, 22, 29, 28, 17, 30, 34, 17,
20, 49, 29, 35, 49, 25, 55, 42, 29, 16)) # See Zeger and Qaqish (1988)
-gdata = transform(gdata, spikenum = seq(interspike))
-bvalue = 0.1 # .Machine$double.xmin # Boundary value
-fit = vglm(interspike ~ 1, trace = TRUE, data = gdata,
- garma("loge", earg = list(bvalue = bvalue),
- p = 2, coef = c(4, 0.3, 0.4)))
+gdata <- transform(gdata, spikenum = seq(interspike))
+bvalue <- 0.1 # .Machine$double.xmin # Boundary value
+fit <- vglm(interspike ~ 1, trace = TRUE, data = gdata,
+ garma(loge(bvalue = bvalue),
+ p = 2, coefstart = c(4, 0.3, 0.4)))
summary(fit)
coef(fit, matrix = TRUE)
-Coef(fit) # A bug here
+Coef(fit) # A bug here
\dontrun{ with(gdata, plot(interspike, ylim = c(0, 120), las = 1,
xlab = "Spike Number", ylab = "Inter-Spike Time (ms)", col = "blue"))
with(gdata, lines(spikenum[-(1:fit at misc$plag)], fitted(fit), col = "orange"))
diff --git a/man/gaussianff.Rd b/man/gaussianff.Rd
index 3ad204b..fb471d6 100644
--- a/man/gaussianff.Rd
+++ b/man/gaussianff.Rd
@@ -122,24 +122,24 @@ gaussianff(dispersion = 0, parallel = FALSE, zero = NULL)
}
\examples{
-mydat = data.frame(x = sort(runif(n <- 40)))
-mydat = transform(mydat, y1 = 1 + 2*x + rnorm(n, sd=0.1),
- y2 = 3 + 4*x + rnorm(n, sd=0.1),
- y3 = 7 + 4*x + rnorm(n, sd=0.1))
-fit = vglm(cbind(y1,y2) ~ x, gaussianff, data=mydat)
-coef(fit, matrix=TRUE)
+gdata <- data.frame(x2 = sort(runif(n <- 40)))
+gdata <- transform(gdata, y1 = 1 + 2*x2 + rnorm(n, sd = 0.1),
+ y2 = 3 + 4*x2 + rnorm(n, sd = 0.1),
+ y3 = 7 + 4*x2 + rnorm(n, sd = 0.1))
+fit <- vglm(cbind(y1,y2) ~ x2, gaussianff, data = gdata)
+coef(fit, matrix = TRUE)
# For comparison:
-coef( lmfit <- lm(y1 ~ x, data=mydat))
-coef(glmfit <- glm(y2 ~ x, data=mydat, gaussian))
+coef( lmfit <- lm(y1 ~ x2, data = gdata))
+coef(glmfit <- glm(y2 ~ x2, data = gdata, gaussian))
vcov(fit)
vcov(lmfit)
-t(weights(fit, type="prior")) # Unweighted observations
-head(weights(fit, type="working")) # Identity matrices
+t(weights(fit, type = "prior")) # Unweighted observations
+head(weights(fit, type = "working")) # Identity matrices
# Reduced-rank VLM (rank-1)
-fit2 = rrvglm(cbind(y1,y2,y3) ~ x, gaussianff, data=mydat)
+fit2 <- rrvglm(cbind(y1, y2, y3) ~ x2, gaussianff, data = gdata)
Coef(fit2)
}
\keyword{models}
diff --git a/man/genbetaII.Rd b/man/genbetaII.Rd
index 54b24d2..2fd8549 100644
--- a/man/genbetaII.Rd
+++ b/man/genbetaII.Rd
@@ -9,7 +9,6 @@
}
\usage{
genbetaII(lshape1.a = "loge", lscale = "loge", lshape2.p = "loge", lshape3.q = "loge",
- eshape1.a = list(), escale = list(), eshape2.p = list(), eshape3.q = list(),
ishape1.a = NULL, iscale = NULL, ishape2.p = 1, ishape3.q = 1,
zero = NULL)
}
@@ -26,12 +25,6 @@ genbetaII(lshape1.a = "loge", lscale = "loge", lshape2.p = "loge", lshape3.q = "
}
- \item{eshape1.a, escale, eshape2.p, eshape3.q}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
-
- }
\item{ishape1.a, iscale}{
Optional initial values for \code{a} and \code{scale}.
A \code{NULL} means a value is computed internally.
@@ -139,10 +132,10 @@ More improvements could be made here.
}
\examples{
-gdata = data.frame(y = rsinmad(3000, exp(2), exp(2), exp(1))) # A special case!
-fit = vglm(y ~ 1, genbetaII, gdata, trace = TRUE)
-fit = vglm(y ~ 1, data = gdata, trace = TRUE,
- genbetaII(ishape1.a = 4, ishape2.p = 2.2, iscale = 7, ishape3.q = 2.3))
+gdata <- data.frame(y = rsinmad(3000, exp(2), exp(2), exp(1))) # A special case!
+fit <- vglm(y ~ 1, genbetaII, gdata, trace = TRUE)
+fit <- vglm(y ~ 1, data = gdata, trace = TRUE,
+ genbetaII(ishape1.a = 4, ishape2.p = 2.2, iscale = 7, ishape3.q = 2.3))
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/gengamma.Rd b/man/gengamma.Rd
index a0405f1..0742075 100644
--- a/man/gengamma.Rd
+++ b/man/gengamma.Rd
@@ -8,8 +8,7 @@
}
\usage{
-gengamma(lscale = "loge", ld = "loge", lk = "loge",
- escale = list(), ed = list(), ek = list(),
+gengamma(lscale = "loge", ld = "loge", lk = "loge",
iscale = NULL, id = NULL, ik = NULL, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -20,11 +19,6 @@ gengamma(lscale = "loge", ld = "loge", lk = "loge",
See \code{\link{Links}} for more choices.
}
- \item{escale, ed, ek}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{iscale, id, ik}{
Initial value for \eqn{b}, \eqn{d} and \eqn{k}, respectively.
The defaults mean an initial value is determined internally for each.
@@ -45,7 +39,10 @@ gengamma(lscale = "loge", ld = "loge", lk = "loge",
f(y;b,d,k) = d * b^(-d*k) * y^(d*k-1) * exp(-(y/b)^d) / gamma(k)}
for scale parameter \eqn{b > 0}, and \eqn{d > 0}, \eqn{k > 0},
and \eqn{y > 0}.
- The mean of \eqn{Y} is \eqn{bk}{b*k} (returned as the fitted values).
+ The mean of \eqn{Y}
+ is \eqn{b \times \Gamma(k+1/d) / \Gamma(k)}{b*gamma(k+1/d)/gamma(k)}
+ (returned as the fitted values),
+ which equals \eqn{bk}{b*k} if \eqn{d=1}.
There are many special cases, as given in Table 1 of Stacey and Mihram (1965).
@@ -73,7 +70,7 @@ Rayleigh \eqn{f(y;c\sqrt{2},2,1)}{f(y;c sqrt(2),2,1)} where \eqn{c>0}.
\references{
Stacy, E. W. (1962)
A generalization of the gamma distribution.
- \emph{Annals of Mathematical Statistics}, \bold{33}, 1187--1192.
+ \emph{Annals of Mathematical Statistics}, \bold{33}(3), 1187--1192.
Stacy, E. W. and Mihram, G. A. (1965)
@@ -119,21 +116,24 @@ Rayleigh \eqn{f(y;c\sqrt{2},2,1)}{f(y;c sqrt(2),2,1)} where \eqn{c>0}.
\code{\link{gamma2}},
\code{\link{prentice74}}.
+
}
\examples{
-k = exp(-1); Scale = exp(1)
-gdata = data.frame(y = rgamma(1000, shape = k, scale = Scale))
-fit = vglm(y ~ 1, gengamma, gdata, trace = TRUE)
+\dontrun{ k <- exp(-1); Scale = exp(1)
+gdata <- data.frame(y = rgamma(1000, shape = k, scale = Scale))
+fit <- vglm(y ~ 1, gengamma, gdata, trace = TRUE)
coef(fit, matrix = TRUE)
# Another example
-gdata = data.frame(x = runif(nn <- 5000))
-gdata = transform(gdata, Scale = exp(1), d = exp(0 + 1.2*x),
- k = exp(-1 + 2*x))
-gdata = transform(gdata, y = rgengamma(nn, scale = Scale, d = d, k = k))
-fit = vglm(y ~ x, gengamma(zero = 1, iscale = 6), gdata, trace = TRUE)
-fit = vglm(y ~ x, gengamma(zero = 1), gdata, trace = TRUE, maxit = 50)
+gdata <- data.frame(x2 = runif(nn <- 5000))
+gdata <- transform(gdata, Scale = exp(1),
+ d = exp( 0 + 1.2* x2),
+ k = exp(-1 + 2 * x2))
+gdata <- transform(gdata, y = rgengamma(nn, scale = Scale, d = d, k = k))
+fit <- vglm(y ~ x2, gengamma(zero = 1, iscale = 6), gdata, trace = TRUE)
+fit <- vglm(y ~ x2, gengamma(zero = 1), gdata, trace = TRUE, maxit = 50)
coef(fit, matrix = TRUE)
}
+}
\keyword{models}
\keyword{regression}
diff --git a/man/genpoisson.Rd b/man/genpoisson.Rd
index 6bc9335..0e2752c 100644
--- a/man/genpoisson.Rd
+++ b/man/genpoisson.Rd
@@ -6,10 +6,8 @@
Estimation of the two parameters of a generalized Poisson distribution.
}
\usage{
-genpoisson(llambda = "elogit", ltheta = "loge",
- elambda = if (llambda == "elogit") list(min = -1, max = 1)
- else list(),
- etheta = list(), ilambda = NULL, itheta = NULL,
+genpoisson(llambda = elogit(min = -1, max = 1), ltheta = "loge",
+ ilambda = NULL, itheta = NULL,
use.approx = TRUE, imethod = 1, zero = 1)
}
%- maybe also 'usage' for other objects documented here.
@@ -23,11 +21,6 @@ genpoisson(llambda = "elogit", ltheta = "loge",
log link.
}
- \item{elambda, etheta}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{ilambda, itheta}{
Optional initial values for \eqn{\lambda} and \eqn{\theta}.
The default is to choose values internally.
@@ -89,6 +82,7 @@ and the variance is \eqn{\theta / (1 - \lambda)^3}.
}
\references{
+
Consul, P. C. and Famoye, F. (2006)
\emph{Lagrangian Probability Distributions},
Boston: Birkhauser.
@@ -111,15 +105,17 @@ New York: Marcel Dekker.
Convergence problems may occur when \code{lambda} is very close
to 0 or 1.
+
}
\seealso{
\code{\link{poissonff}}.
+
}
\examples{
-gdata = data.frame(x2 = runif(nn <- 200))
-gdata = transform(gdata, y = rpois(nn, exp(2 - x2))) # Ordinary Poisson data
-fit = vglm(y ~ x2, genpoisson(zero = 1), gdata, trace = TRUE)
+gdata <- data.frame(x2 = runif(nn <- 200))
+gdata <- transform(gdata, y = rpois(nn, exp(2 - x2))) # Ordinary Poisson data
+fit <- vglm(y ~ x2, genpoisson(zero = 1), gdata, trace = TRUE)
coef(fit, matrix = TRUE)
summary(fit)
}
diff --git a/man/genrayleigh.Rd b/man/genrayleigh.Rd
index 1fd4484..a5b72c9 100644
--- a/man/genrayleigh.Rd
+++ b/man/genrayleigh.Rd
@@ -8,8 +8,8 @@
}
\usage{
-genrayleigh(lshape = "loge", lscale = "loge", eshape = list(),
- escale = list(), ishape = NULL, iscale = NULL,
+genrayleigh(lshape = "loge", lscale = "loge",
+ ishape = NULL, iscale = NULL,
tol12 = 1e-05, nsimEIM = 300, zero = 1)
}
%- maybe also 'usage' for other objects documented here.
@@ -19,11 +19,6 @@ genrayleigh(lshape = "loge", lscale = "loge", eshape = list(),
See \code{\link{Links}} for more choices.
}
- \item{eshape, escale}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{ishape, iscale}{
Numeric.
Optional initial values for the shape and scale parameters.
diff --git a/man/geometric.Rd b/man/geometric.Rd
index 25d1fcb..21a5865 100644
--- a/man/geometric.Rd
+++ b/man/geometric.Rd
@@ -6,16 +6,16 @@
Maximum likelihood estimation for the geometric distribution.
}
\usage{
-geometric(link = "logit", earg = list(), expected = TRUE, imethod = 1,
- iprob = NULL)
+geometric(link = "logit", expected = TRUE, imethod = 1,
+ iprob = NULL, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link, earg}{
- Parameter link function and extra argument applied to the
+ \item{link}{
+ Parameter link function applied to the
parameter \eqn{p}{prob}, which lies in the unit interval.
- See \code{\link{Links}} for more choices,
- and \code{earg} in \code{\link{Links}} for general information.
+ See \code{\link{Links}} for more choices.
+
}
\item{expected}{
@@ -29,8 +29,7 @@ geometric(link = "logit", earg = list(), expected = TRUE, imethod = 1,
If failure to converge occurs try another value.
}
- \item{iprob}{
- Optional initial value.
+ \item{iprob, zero}{
See \code{\link{CommonVGAMffArguments}} for more details.
}
@@ -52,6 +51,9 @@ geometric(link = "logit", earg = list(), expected = TRUE, imethod = 1,
\eqn{Y+1} has a positive-geometric distribution with the same parameter.
+ Multiple responses are permitted.
+
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -85,14 +87,14 @@ geometric(link = "logit", earg = list(), expected = TRUE, imethod = 1,
}
\examples{
-gdata = data.frame(x2 = runif(nn <- 1000) - 0.5)
-gdata = transform(gdata, x3 = runif(nn) - 0.5,
- x4 = runif(nn) - 0.5)
-gdata = transform(gdata, eta = 1.0 - 1.0 * x2 + 2.0 * x3)
-gdata = transform(gdata, prob = logit(eta, inverse = TRUE))
-gdata = transform(gdata, y = rgeom(nn, prob))
-with(gdata, table(y))
-fit = vglm(y ~ x2 + x3 + x4, geometric, gdata, trace = TRUE)
+gdata <- data.frame(x2 = runif(nn <- 1000) - 0.5)
+gdata <- transform(gdata, x3 = runif(nn) - 0.5,
+ x4 = runif(nn) - 0.5)
+gdata <- transform(gdata, eta = 1.0 - 1.0 * x2 + 2.0 * x3)
+gdata <- transform(gdata, prob = logit(eta, inverse = TRUE))
+gdata <- transform(gdata, y1 = rgeom(nn, prob))
+with(gdata, table(y1))
+fit <- vglm(y1 ~ x2 + x3 + x4, geometric, gdata, trace = TRUE)
coef(fit, matrix = TRUE)
summary(fit)
}
diff --git a/man/gev.Rd b/man/gev.Rd
index d24a512..f16d75f 100644
--- a/man/gev.Rd
+++ b/man/gev.Rd
@@ -9,19 +9,13 @@
}
\usage{
-gev(llocation = "identity", lscale = "loge", lshape = "logoff",
- elocation = list(), escale = list(),
- eshape = if (lshape == "logoff") list(offset = 0.5) else
- if (lshape == "elogit") list(min = -0.5, max = 0.5) else list(),
+gev(llocation = "identity", lscale = "loge", lshape = logoff(offset = 0.5),
percentiles = c(95, 99), iscale=NULL, ishape = NULL,
- imethod = 1, gshape=c(-0.45, 0.45), tolshape0 = 0.001,
+ imethod = 1, gshape = c(-0.45, 0.45), tolshape0 = 0.001,
giveWarning = TRUE, zero = 3)
-egev(llocation = "identity", lscale = "loge", lshape = "logoff",
- elocation = list(), escale = list(),
- eshape = if (lshape == "logoff") list(offset = 0.5) else
- if (lshape == "elogit") list(min = -0.5, max = 0.5) else list(),
+egev(llocation = "identity", lscale = "loge", lshape = logoff(offset = 0.5),
percentiles = c(95, 99), iscale=NULL, ishape = NULL,
- imethod = 1, gshape=c(-0.45, 0.45), tolshape0 = 0.001,
+ imethod = 1, gshape = c(-0.45, 0.45), tolshape0 = 0.001,
giveWarning = TRUE, zero = 3)
}
%- maybe also 'usage' for other objects documented here.
@@ -31,20 +25,20 @@ egev(llocation = "identity", lscale = "loge", lshape = "logoff",
\eqn{\xi}{xi} respectively.
See \code{\link{Links}} for more choices.
- }
- \item{elocation, escale, eshape}{
- List. Extra argument for the respective links.
- See \code{earg} in \code{\link{Links}} for general information.
+
For the shape parameter,
- if the \code{\link{logoff}} link is chosen then the offset is
+ the default \code{\link{logoff}} link has an offset
called \eqn{A} below; and then the linear/additive predictor is
\eqn{\log(\xi+A)}{log(xi+A)} which means that
\eqn{\xi > -A}{xi > -A}.
For technical reasons (see \bold{Details}) it is a good idea
for \eqn{A = 0.5}.
+
}
+
+
% \item{Offset}{
% Numeric, of length 1.
% Called \eqn{A} below.
@@ -56,10 +50,12 @@ egev(llocation = "identity", lscale = "loge", lshape = "logoff",
% \code{Offset = 0.5}.
% }
+
+
\item{percentiles}{
Numeric vector of percentiles used
for the fitted values. Values should be between 0 and 100.
- However, if \code{percentiles=NULL}, then the mean
+ However, if \code{percentiles = NULL}, then the mean
\eqn{\mu + \sigma (\Gamma(1-\xi)-1) / \xi}{mu + sigma * (gamma(1-xi)-1)/xi}
is returned, and this is only defined if \eqn{\xi<1}{xi<1}.
@@ -82,15 +78,17 @@ egev(llocation = "identity", lscale = "loge", lshape = "logoff",
% Numeric, of length 2.
% Range of \eqn{\xi}{xi} if \code{lshape = "elogit"} is chosen.
% The rationale for the default values is given below.
-
% }
+
+
% \item{mean}{
% Logical. If \code{TRUE}, the mean is computed and returned
% as the fitted values. This argument overrides the
% \code{percentiles} argument.
% See \bold{Details} for more details.
-
% }
+
+
\item{imethod}{
Initialization method. Either the value 1 or 2.
Method 1 involves choosing the best \eqn{\xi}{xi} on a course grid with
@@ -118,7 +116,7 @@ egev(llocation = "identity", lscale = "loge", lshape = "logoff",
linear/additive predictors are modelled as intercepts only.
The values must be from the set \{1,2,3\} corresponding
respectively to \eqn{\mu}{mu}, \eqn{\sigma}{sigma}, \eqn{\xi}{xi}.
- If \code{zero=NULL} then all linear/additive predictors are modelled as
+ If \code{zero = NULL} then all linear/additive predictors are modelled as
a linear combination of the explanatory variables.
For many data sets having \code{zero = 3} is a good idea.
@@ -159,8 +157,8 @@ egev(llocation = "identity", lscale = "loge", lshape = "logoff",
Smith (1985) established that when \eqn{\xi > -0.5}{xi > -0.5},
the maximum likelihood estimators are completely regular.
To have some control over the estimated \eqn{\xi}{xi} try
- using \code{lshape = "logoff"} and the \code{eshape=list(offset = 0.5)}, say,
- or \code{lshape = "elogit"} and \code{eshape=list(min = -0.5, max = 0.5)}, say.
+ using \code{lshape = logoff(offset = 0.5)}, say,
+ or \code{lshape = elogit(min = -0.5, max = 0.5)}, say.
% and when \eqn{-1 < \xi < -0.5}{-1 < xi < -0.5} they exist but are
@@ -193,6 +191,7 @@ egev(llocation = "identity", lscale = "loge", lshape = "logoff",
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
Yee, T. W. and Stephenson, A. G. (2007)
@@ -256,6 +255,7 @@ egev(llocation = "identity", lscale = "loge", lshape = "logoff",
\code{\link{guplot}},
\code{\link{rlplot.egev}},
\code{\link{gpd}},
+ \code{\link{weibull}},
\code{\link{frechet2}},
\code{\link{elogit}},
\code{\link{oxtemp}},
@@ -266,22 +266,21 @@ egev(llocation = "identity", lscale = "loge", lshape = "logoff",
\examples{
# Multivariate example
-fit1 = vgam(cbind(r1, r2) ~ s(year, df = 3), gev(zero = 2:3),
- venice, trace = TRUE)
+fit1 <- vgam(cbind(r1, r2) ~ s(year, df = 3), gev(zero = 2:3),
+ venice, trace = TRUE)
coef(fit1, matrix = TRUE)
head(fitted(fit1))
-\dontrun{
-par(mfrow=c(1,2), las = 1)
+\dontrun{ par(mfrow = c(1, 2), las = 1)
plot(fit1, se = TRUE, lcol = "blue", scol = "forestgreen",
main = "Fitted mu(year) function (centered)", cex.main = 0.8)
-with(venice, matplot(year, y[,1:2], ylab = "Sea level (cm)", col = 1:2,
- main = "Highest 2 annual sea levels", cex.main = 0.8))
+with(venice, matplot(year, depvar(fit1)[, 1:2], ylab = "Sea level (cm)",
+ col = 1:2, main = "Highest 2 annual sea levels", cex.main = 0.8))
with(venice, lines(year, fitted(fit1)[,1], lty = "dashed", col = "blue"))
legend("topleft", lty = "dashed", col = "blue", "Fitted 95 percentile") }
# Univariate example
-(fit = vglm(maxtemp ~ 1, egev, oxtemp, trace = TRUE))
+(fit <- vglm(maxtemp ~ 1, egev, oxtemp, trace = TRUE))
head(fitted(fit))
coef(fit, matrix = TRUE)
Coef(fit)
diff --git a/man/gevUC.Rd b/man/gevUC.Rd
index 7cfef1c..3475388 100644
--- a/man/gevUC.Rd
+++ b/man/gevUC.Rd
@@ -11,6 +11,8 @@
location parameter \code{location},
scale parameter \code{scale} and
shape parameter \code{shape}.
+
+
}
\usage{
dgev(x, location = 0, scale = 1, shape = 0, log = FALSE, tolshape0 =
@@ -100,8 +102,8 @@ London: Springer-Verlag.
}
\examples{
\dontrun{
-x = seq(-3, 3, by = 0.01)
-loc = 0; sigma = 1; xi = -0.4
+x <- seq(-3, 3, by = 0.01)
+loc <- 0; sigma <- 1; xi <- -0.4
plot(x, dgev(x, loc, sigma, xi), type = "l", col = "blue", ylim = c(0,1),
main = "Blue is density, red is cumulative distribution function",
sub = "Purple are 5,10,...,95 percentiles", ylab = "", las = 1)
diff --git a/man/golf.Rd b/man/golf.Rd
index 2071f3e..b8c56ac 100644
--- a/man/golf.Rd
+++ b/man/golf.Rd
@@ -6,10 +6,11 @@
Computes the gamma-ordinal transformation, including its inverse
and the first two derivatives.
+
}
\usage{
-golf(theta, earg = list(lambda = 1), inverse = FALSE, deriv = 0,
- short = TRUE, tag = FALSE)
+golf(theta, lambda = 1, cutpoint = NULL,
+ inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -17,13 +18,11 @@ golf(theta, earg = list(lambda = 1), inverse = FALSE, deriv = 0,
Numeric or character.
See below for further details.
+
}
- \item{earg}{
- Extra argument for passing in additional information.
- This must be list with component \code{lambda}.
- Here, \code{lambda} is the shape parameter
- in \code{\link{gamma2}}.
- A component in the list called \code{cutpoint} is optional; if omitted
+ \item{lambda, cutpoint}{
+ The former is the shape parameter in \code{\link{gamma2}}.
+ \code{cutpoint} is optional; if \code{NULL}
then \code{cutpoint} is ignored from the GOLF definition.
If given, the cutpoints should be non-negative integers.
If \code{golf()} is used as the link function in
@@ -33,26 +32,17 @@ golf(theta, earg = list(lambda = 1), inverse = FALSE, deriv = 0,
If the cutpoints are unknown, then choose
\code{reverse = TRUE, parallel = TRUE, intercept.apply = FALSE}.
- }
- \item{inverse}{
- Logical. If \code{TRUE} the inverse function is computed.
}
- \item{deriv}{
- Order of the derivative. Integer with value 0, 1 or 2.
- }
- \item{short}{
- Used for labelling the \code{blurb} slot of a
- \code{\link{vglmff-class}} object.
- }
- \item{tag}{
- Used for labelling the linear/additive predictor in the
- \code{initialize} slot of a \code{\link{vglmff-class}} object.
- Contains a little more information if \code{TRUE}.
+ \item{inverse, deriv, short, tag}{
+ Details at \code{\link{Links}}.
+
}
+
+
}
\details{
The gamma-ordinal link function (GOLF) can be applied to a
@@ -62,9 +52,6 @@ golf(theta, earg = list(lambda = 1), inverse = FALSE, deriv = 0,
distribution.
- The arguments \code{short} and \code{tag} are used only if
- \code{theta} is character.
-
See \code{\link{Links}} for general information about \pkg{VGAM}
link functions.
@@ -117,48 +104,45 @@ golf(theta, earg = list(lambda = 1), inverse = FALSE, deriv = 0,
}
\examples{
-earg = list(lambda = 1)
-golf("p", earg = earg, short = FALSE)
-golf("p", earg = earg, tag = TRUE)
+golf("p", lambda = 1, short = FALSE)
+golf("p", lambda = 1, tag = TRUE)
-p = seq(0.02, 0.98, len = 201)
-y = golf(p, earg = earg)
-y. = golf(p, earg = earg, deriv = 1)
-max(abs(golf(y, earg = earg, inv = TRUE) - p)) # Should be 0
+p <- seq(0.02, 0.98, len = 201)
+y <- golf(p, lambda = 1)
+y. <- golf(p, lambda = 1, deriv = 1)
+max(abs(golf(y, lambda = 1, inv = TRUE) - p)) # Should be 0
-\dontrun{
-par(mfrow=c(2,1), las = 1)
+\dontrun{par(mfrow = c(2, 1), las = 1)
plot(p, y, type = "l", col = "blue", main = "golf()")
-abline(h=0, v=0.5, col = "red", lty = "dashed")
-
+abline(h = 0, v = 0.5, col = "orange", lty = "dashed")
plot(p, y., type = "l", col = "blue",
main = "(Reciprocal of) first GOLF derivative")
}
-
# Another example
-gdata = data.frame(x2 = sort(runif(nn <- 1000)))
-gdata = transform(gdata, x3 = runif(nn))
-gdata = transform(gdata, mymu = exp( 3 + 1 * x2 - 2 * x3))
-lambda = 4
-gdata = transform(gdata, y1 = rgamma(nn, shape=lambda, scale=mymu/lambda))
-cutpoints = c(-Inf, 10, 20, Inf)
-gdata = transform(gdata, cuty = Cut(y1, breaks=cutpoints))
-\dontrun{
-par(mfrow=c(1,1), las = 1)
-with(gdata, plot(x2, x3, col=cuty, pch=as.character(cuty))) }
+gdata <- data.frame(x2 = sort(runif(nn <- 1000)))
+gdata <- transform(gdata, x3 = runif(nn))
+gdata <- transform(gdata, mymu = exp( 3 + 1 * x2 - 2 * x3))
+lambda <- 4
+gdata <- transform(gdata,
+ y1 = rgamma(nn, shape = lambda, scale = mymu / lambda))
+cutpoints <- c(-Inf, 10, 20, Inf)
+gdata <- transform(gdata, cuty = Cut(y1, breaks = cutpoints))
+
+\dontrun{ par(mfrow = c(1, 1), las = 1)
+with(gdata, plot(x2, x3, col = cuty, pch = as.character(cuty))) }
with(gdata, table(cuty) / sum(table(cuty)))
-fit = vglm(cuty ~ x2 + x3, fam = cumulative(link = "golf",
+fit <- vglm(cuty ~ x2 + x3, cumulative(mv = TRUE,
reverse = TRUE, parallel = TRUE, intercept.apply = TRUE,
- mv = TRUE, earg = list(cutpoint=cutpoints[2:3], lambda=lambda)),
- gdata, trace = TRUE)
-head(fit at y)
+ link = golf(cutpoint = cutpoints[2:3], lambda = lambda)),
+ data = gdata, trace = TRUE)
+head(depvar(fit))
head(fitted(fit))
head(predict(fit))
coef(fit)
coef(fit, matrix = TRUE)
constraints(fit)
-fit at misc$earg
+fit at misc
}
\keyword{math}
\keyword{models}
diff --git a/man/gompertz.Rd b/man/gompertz.Rd
new file mode 100644
index 0000000..cd63b78
--- /dev/null
+++ b/man/gompertz.Rd
@@ -0,0 +1,133 @@
+\name{gompertz}
+\alias{gompertz}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Gompertz Distribution Family Function }
+\description{
+ Maximum likelihood estimation of the 2-parameter
+ Gompertz distribution.
+
+}
+\usage{
+gompertz(lshape = "loge", lscale = "loge",
+ ishape = NULL, iscale = NULL,
+ nsimEIM = 500, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lshape, lscale}{
+ Parameter link functions applied to the
+ shape parameter \code{a},
+ scale parameter \code{scale}.
+ All parameters are positive.
+ See \code{\link{Links}} for more choices.
+
+
+ }
+
+% \item{eshape, escale}{
+% List. Extra argument for each of the links.
+% eshape = list(), escale = list(),
+% See \code{earg} in \code{\link{Links}} for general information.
+% }
+
+ \item{ishape, iscale}{
+ Optional initial values.
+ A \code{NULL} means a value is computed internally.
+
+
+ }
+ \item{nsimEIM, zero}{
+ See \code{\link{CommonVGAMffArguments}}.
+
+ }
+}
+\details{
+The Gompertz distribution has a cumulative distribution function
+ \deqn{F(x;\alpha, \beta) = 1 - \exp[-(\alpha/\beta) \times (\exp(\beta x) - 1) ]}{%
+ F(x;alpha, beta) = 1 - exp(-(alpha/beta) * (exp(beta * x) - 1) )}
+which leads to a probability density function
+ \deqn{f(x; \alpha, \beta) = \alpha \exp(\beta x)
+ \exp [-(\alpha/\beta) \times (\exp(\beta x) - 1) ]}{%
+ f(x; alpha, beta) = alpha * exp[-beta * x] * exp[-(alpha/beta) * (exp(beta * x) - 1) ]}
+ for \eqn{\alpha > 0}{a > 0},
+ \eqn{\beta > 0}{b > 0},
+ \eqn{x > 0}.
+Here, \eqn{\beta} is called the scale parameter \code{scale},
+and \eqn{\alpha} is called the shape parameter
+(one could refer to \eqn{\alpha}{a} as a location parameter and \eqn{\beta}{b} as
+a shape parameter---see Lenart (2012)).
+The mean is involves an exponential integral function.
+Simulated Fisher scoring is used and multiple responses are handled.
+
+
+The Makeham distibution has an additional parameter compared to
+the Gompertz distribution.
+If \eqn{X} is defined to be the result of sampling from a Gumbel
+distribution until a negative value \eqn{Z} is produced,
+then \eqn{X = -Z} has a Gompertz distribution.
+
+
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+
+}
+\references{
+
+Lenart, A. (2012)
+The moments of the Gompertz distribution
+and maximum likelihood estimation of its parameters.
+\emph{Scandinavian Actuarial Journal}, in press.
+
+
+}
+
+\author{ T. W. Yee }
+\section{Warning }{
+The same warnings in \code{\link{makeham}} apply here too.
+
+
+}
+
+\seealso{
+ \code{\link{dgompertz}},
+ \code{\link{makeham}}.
+
+
+}
+
+\examples{
+\dontrun{
+gdata <- data.frame(x2 = runif(nn <- 1000))
+gdata <- transform(gdata, eta1 = -1,
+ eta2 = -1 + 0.2 * x2,
+ ceta1 = 1,
+ ceta2 = -1 + 0.2 * x2)
+gdata <- transform(gdata, shape1 = exp(eta1),
+ shape2 = exp(eta2),
+ scale1 = exp(ceta1),
+ scale2 = exp(ceta2))
+gdata <- transform(gdata, y1 = rgompertz(nn, shape = shape1, scale = scale1),
+ y2 = rgompertz(nn, shape = shape2, scale = scale2))
+
+fit1 <- vglm(y1 ~ 1, gompertz, data = gdata, trace = TRUE)
+fit2 <- vglm(y2 ~ x2, gompertz, data = gdata, trace = TRUE)
+coef(fit1, matrix = TRUE)
+Coef(fit1)
+summary(fit1)
+coef(fit2, matrix = TRUE)
+summary(fit2)
+}
+}
+\keyword{models}
+\keyword{regression}
+
+% probs.y = c(0.20, 0.50, 0.80)
+
+
+
+
diff --git a/man/gompertzUC.Rd b/man/gompertzUC.Rd
new file mode 100644
index 0000000..9c793b6
--- /dev/null
+++ b/man/gompertzUC.Rd
@@ -0,0 +1,78 @@
+\name{Gompertz}
+\alias{Gompertz}
+\alias{dgompertz}
+\alias{pgompertz}
+\alias{qgompertz}
+\alias{rgompertz}
+\title{The Gompertz Distribution}
+\description{
+ Density, cumulative distribution function,
+ quantile function
+ and
+ random generation for
+ the Gompertz distribution.
+
+}
+\usage{
+dgompertz(x, shape, scale = 1, log = FALSE)
+pgompertz(q, shape, scale = 1)
+qgompertz(p, shape, scale = 1)
+rgompertz(n, shape, scale = 1)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. }
+ \item{log}{
+ Logical.
+ If \code{log = TRUE} then the logarithm of the density is returned.
+
+ }
+ \item{shape, scale}{positive shape and scale parameters. }
+
+}
+\value{
+ \code{dgompertz} gives the density,
+ \code{pgompertz} gives the cumulative distribution function,
+ \code{qgompertz} gives the quantile function, and
+ \code{rgompertz} generates random deviates.
+
+
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{gompertz}} for details.
+
+}
+%\note{
+%
+%}
+\seealso{
+ \code{\link{gompertz}},
+ \code{\link{dgumbel}},
+ \code{\link{dmakeham}}.
+
+
+}
+\examples{
+probs <- seq(0.01, 0.99, by = 0.01)
+Shape <- exp(1); Scale <- exp(1);
+max(abs(pgompertz(qgompertz(p = probs, Shape, Scale),
+ Shape, Scale) - probs)) # Should be 0
+
+\dontrun{ x <- seq(-0.1, 1.0, by = 0.01);
+plot(x, dgompertz(x, Shape, Scale), type = "l", col = "blue", las = 1,
+ main = "Blue is density, orange is cumulative distribution function",
+ sub = "Purple lines are the 10,20,...,90 percentiles",
+ ylab = "")
+abline(h = 0, col = "blue", lty = 2)
+lines(x, pgompertz(x, Shape, Scale), col = "orange")
+probs <- seq(0.1, 0.9, by = 0.1)
+Q <- qgompertz(probs, Shape, Scale)
+lines(Q, dgompertz(Q, Shape, Scale), col = "purple", lty = 3, type = "h")
+pgompertz(Q, Shape, Scale) - probs # Should be all zero
+abline(h = probs, col = "purple", lty = 3) }
+}
+\keyword{distribution}
+
+
diff --git a/man/gpd.Rd b/man/gpd.Rd
index 1d9c300..d60a2eb 100644
--- a/man/gpd.Rd
+++ b/man/gpd.Rd
@@ -8,11 +8,9 @@
}
\usage{
-gpd(threshold = 0, lscale = "loge", lshape = "logoff", escale = list(),
- eshape = if (lshape == "logoff") list(offset = 0.5) else
- if (lshape == "elogit") list(min = -0.5, max = 0.5) else NULL,
+gpd(threshold = 0, lscale = "loge", lshape = logoff(offset = 0.5),
percentiles = c(90, 95), iscale = NULL, ishape = NULL,
- tolshape0 = 0.001, giveWarning = TRUE, imethod = 1, zero = 2)
+ tolshape0 = 0.001, giveWarning = TRUE, imethod = 1, zero = -2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -34,12 +32,9 @@ gpd(threshold = 0, lscale = "loge", lshape = "logoff", escale = list(),
scoring does not work.
See the Details section below for more information.
- }
- \item{escale, eshape}{
- Extra argument for the \code{lscale} and \code{lshape} arguments.
- See \code{earg} in \code{\link{Links}} for general information.
+
For the shape parameter,
- if the \code{\link{logoff}} link is chosen then the offset is
+ the default \code{\link{logoff}} link has an offset
called \eqn{A} below; and then the second linear/additive predictor is
\eqn{\log(\xi+A)}{log(xi+A)} which means that
\eqn{\xi > -A}{xi > -A}.
@@ -104,15 +99,19 @@ gpd(threshold = 0, lscale = "loge", lshape = "logoff", escale = list(),
\item{zero}{
An integer-valued vector specifying which
linear/additive predictors are modelled as intercepts only.
- The value must be from the set \{1,2\} corresponding
+ For one response, the value should be from the set \{1,2\} corresponding
respectively to \eqn{\sigma}{sigma} and \eqn{\xi}{xi}.
It is often a good idea for the \eqn{\sigma}{sigma} parameter only
to be modelled through
a linear combination of the explanatory variables because the
shape parameter is probably best left as an intercept only:
\code{zero = 2}.
- Setting \code{zero=NULL} means both parameters are modelled with
+ Setting \code{zero = NULL} means both parameters are modelled with
explanatory variables.
+ See \code{\link{CommonVGAMffArguments}} for more details.
+
+
+
}
}
@@ -163,6 +162,9 @@ gpd(threshold = 0, lscale = "loge", lshape = "logoff", escale = list(),
The response in the formula of \code{\link{vglm}}
and \code{\link{vgam}} is \eqn{y}.
Internally, \eqn{y-\mu}{y-mu} is computed.
+ This \pkg{VGAM} family function can handle a multiple
+ responses, which is inputted as a matrix.
+
With functions \code{\link{rgpd}}, \code{\link{dgpd}}, etc., the
@@ -223,23 +225,23 @@ gpd(threshold = 0, lscale = "loge", lshape = "logoff", escale = list(),
\examples{
# Simulated data from an exponential distribution (xi = 0)
-threshold = 0.5
-gdata = data.frame(y1 = threshold + rexp(n = 3000, rate = 2))
-fit = vglm(y1 ~ 1, gpd(threshold = threshold), gdata, trace = TRUE)
+threshold <- 0.5
+gdata <- data.frame(y1 = threshold + rexp(n = 3000, rate = 2))
+fit <- vglm(y1 ~ 1, gpd(threshold = threshold), gdata, trace = TRUE)
head(fitted(fit))
coef(fit, matrix = TRUE) # xi should be close to 0
Coef(fit)
summary(fit)
-fit at extra$threshold # Note the threshold is stored here
+fit at extra$threshold # Note the threshold is stored here
# Check the 90 percentile
-ii = depvar(fit) < fitted(fit)[1, "90\%"]
-100 * table(ii) / sum(table(ii)) # Should be 90%
+ii <- depvar(fit) < fitted(fit)[1, "90\%"]
+100 * table(ii) / sum(table(ii)) # Should be 90%
# Check the 95 percentile
-ii = depvar(fit) < fitted(fit)[1, "95\%"]
-100 * table(ii) / sum(table(ii)) # Should be 95%
+ii <- depvar(fit) < fitted(fit)[1, "95\%"]
+100 * table(ii) / sum(table(ii)) # Should be 95%
\dontrun{ plot(depvar(fit), col = "blue", las = 1,
main = "Fitted 90\% and 95\% quantiles")
@@ -247,21 +249,21 @@ matlines(1:length(depvar(fit)), fitted(fit), lty = 2:3, lwd = 2) }
# Another example
-gdata = data.frame(x2 = runif(nn <- 2000))
-threshold = 0; xi = exp(-0.8) - 0.5
-gdata = transform(gdata, y2 = rgpd(nn, scale = exp(1+0.1*x2), shape = xi))
-fit = vglm(y2 ~ x2, gpd(threshold), gdata, trace = TRUE)
+gdata <- data.frame(x2 = runif(nn <- 2000))
+threshold <- 0; xi <- exp(-0.8) - 0.5
+gdata <- transform(gdata, y2 = rgpd(nn, scale = exp(1 + 0.1*x2), shape = xi))
+fit <- vglm(y2 ~ x2, gpd(threshold), gdata, trace = TRUE)
coef(fit, matrix = TRUE)
\dontrun{ # Nonparametric fits
-gdata = transform(gdata, yy = y2 + rnorm(nn, sd = 0.1))
+gdata <- transform(gdata, yy = y2 + rnorm(nn, sd = 0.1))
# Not so recommended:
-fit1 = vgam(yy ~ s(x2), gpd(threshold), gdata, trace = TRUE)
+fit1 <- vgam(yy ~ s(x2), gpd(threshold), gdata, trace = TRUE)
par(mfrow = c(2,1))
plotvgam(fit1, se = TRUE, scol = "blue")
# More recommended:
-fit2 = vglm(yy ~ bs(x2), gpd(threshold), gdata, trace = TRUE)
+fit2 <- vglm(yy ~ bs(x2), gpd(threshold), gdata, trace = TRUE)
plotvgam(fit2, se = TRUE, scol = "blue") }
}
\keyword{models}
diff --git a/man/gpdUC.Rd b/man/gpdUC.Rd
index eb6f6a1..8815781 100644
--- a/man/gpdUC.Rd
+++ b/man/gpdUC.Rd
@@ -102,8 +102,8 @@ London: Springer-Verlag.
}
\examples{
-\dontrun{ x = seq(-0.2, 3, by = 0.01)
-loc = 0; sigma = 1; xi = -0.4
+\dontrun{ x <- seq(-0.2, 3, by = 0.01)
+loc <- 0; sigma <- 1; xi <- -0.4
plot(x, dgpd(x, loc, sigma, xi), type = "l", col = "blue", ylim = c(0, 1),
main = "Blue is density, red is cumulative distribution function",
sub = "Purple are 5,10,...,95 percentiles", ylab = "", las = 1)
diff --git a/man/grc.Rd b/man/grc.Rd
index 04ac925..0f91dda 100644
--- a/man/grc.Rd
+++ b/man/grc.Rd
@@ -1,17 +1,17 @@
\name{grc}
\alias{grc}
-\alias{rcam}
+\alias{rcim}
%- Also NEED an `\alias' for EACH other topic documented here.
-\title{ Row-Column Association Models including Goodman's RC Association Model }
+\title{ Row-Column Interaction Models including Goodman's RC Association Model }
\description{
Fits a Goodman's RC association model to a matrix of counts,
- and more generally, a sub-class of row-column association models.
+ and more generally, a sub-class of row-column interaction models.
}
\usage{
grc(y, Rank = 1, Index.corner = 2:(1 + Rank),
szero = 1, summary.arg = FALSE, h.step = 1e-04, ...)
-rcam(y, family = poissonff, Rank = 0, Musual = NULL,
+rcim(y, family = poissonff, Rank = 0, Musual = NULL,
weights = NULL, which.lp = 1,
Index.corner = if (!Rank) NULL else 1 + Musual * (1:Rank),
rprefix = "Row.", cprefix = "Col.", offset = 0,
@@ -26,7 +26,7 @@ rcam(y, family = poissonff, Rank = 0, Musual = NULL,
\arguments{
\item{y}{
For \code{grc} a matrix of counts.
- For \code{rcam} a general matrix response depending on \code{family}.
+ For \code{rcim} a general matrix response depending on \code{family}.
Output from \code{table()} is acceptable; it is converted into a matrix.
Note that \code{y} should be at least 3 by 3 in dimension.
@@ -115,7 +115,7 @@ rcam(y, family = poissonff, Rank = 0, Musual = NULL,
\item{Musual}{
The number of linear predictors of the \pkg{VGAM} \code{family} function
for an ordinary (univariate) response.
- Then the number of linear predictors of the \code{rcam()} fit is
+ Then the number of linear predictors of the \code{rcim()} fit is
usually the number of columns of \code{y} multiplied by \code{Musual}.
The default is to evaluate the \code{infos} slot of the
\pkg{VGAM} \code{family} function to try to evaluate it;
@@ -151,12 +151,12 @@ These are called \code{Row.} and \code{Col.} (by default) followed
by the row or column number.
-The function \code{rcam()} is more general than \code{grc()}.
+The function \code{rcim()} is more general than \code{grc()}.
Its default is a no-interaction model of \code{grc()}, i.e.,
rank-0 and a Poisson distribution. This means that each
row and column has a dummy variable associated with it.
The first row and column is baseline.
-The power of \code{rcam()} is that many \pkg{VGAM} family functions
+The power of \code{rcim()} is that many \pkg{VGAM} family functions
can be assigned to its \code{family} argument.
For example,
\code{\link{normal1}} fits something in between a 2-way
@@ -176,12 +176,12 @@ result may not have meaning.
An object of class \code{"grc"}, which currently is the same as
an \code{"rrvglm"} object.
Currently,
- a rank-0 \code{rcam()} object is of class \code{\link{rcam0-class}},
- else of class \code{"rcam"} (this may change in the future).
+ a rank-0 \code{rcim()} object is of class \code{\link{rcim0-class}},
+ else of class \code{"rcim"} (this may change in the future).
% Currently,
-% a rank-0 \code{rcam()} object is of class \code{\link{vglm-class}},
-% but it may become of class \code{"rcam"} one day.
+% a rank-0 \code{rcim()} object is of class \code{\link{vglm-class}},
+% but it may become of class \code{"rcim"} one day.
}
@@ -193,7 +193,7 @@ Reduced-rank vector generalized linear models.
Yee, T. W. and Hadi, A. F. (2012)
-Row-column association models
+Row-column interaction models
\emph{In preparation}.
@@ -243,7 +243,7 @@ assistance from Alfian F. Hadi.
}
\section{Warning}{
- The function \code{rcam()} is experimental at this stage and
+ The function \code{rcim()} is experimental at this stage and
may have bugs.
Quite a lot of expertise is needed when fitting and in its
interpretion thereof. For example, the constraint
@@ -261,7 +261,7 @@ assistance from Alfian F. Hadi.
The functions temporarily create a permanent data frame
- called \code{.grc.df} or \code{.rcam.df}, which used
+ called \code{.grc.df} or \code{.rcim.df}, which used
to be needed by \code{summary.rrvglm()}. Then these
data frames are deleted before exiting the function.
If an error occurs, then the data frames may be present
@@ -277,9 +277,9 @@ assistance from Alfian F. Hadi.
\code{\link{rrvglm-class}},
\code{summary.grc},
\code{\link{moffset}},
- \code{\link{Rcam}},
+ \code{\link{Rcim}},
\code{\link{Qvar}},
- \code{\link{plotrcam0}},
+ \code{\link{plotrcim0}},
\code{\link{alcoff}},
\code{\link{crashi}},
\code{\link{auuc}},
@@ -309,19 +309,19 @@ Coef(oly1)
# Roughly median polish
-rcam0 <- rcam(auuc, fam = alaplace2(tau = 0.5, intparloc = TRUE), trace = TRUE)
-round(fitted(rcam0), dig = 0)
-round(100 * (fitted(rcam0) - auuc) / auuc, dig = 0) # Discrepancy
-rcam0 at y
-round(coef(rcam0, matrix = TRUE), dig = 2)
-print(Coef(rcam0, matrix = TRUE), dig = 3)
-# constraints(rcam0)
-names(constraints(rcam0))
+rcim0 <- rcim(auuc, fam = alaplace2(tau = 0.5, intparloc = TRUE), trace = TRUE)
+round(fitted(rcim0), dig = 0)
+round(100 * (fitted(rcim0) - auuc) / auuc, dig = 0) # Discrepancy
+rcim0 at y
+round(coef(rcim0, matrix = TRUE), dig = 2)
+print(Coef(rcim0, matrix = TRUE), dig = 3)
+# constraints(rcim0)
+names(constraints(rcim0))
# Compare with medpolish():
(med.a <- medpolish(auuc))
fv <- med.a$overall + outer(med.a$row, med.a$col, "+")
-round(100 * (fitted(rcam0) - fv) / fv) # Hopefully should be all 0s
+round(100 * (fitted(rcim0) - fv) / fv) # Hopefully should be all 0s
}
\keyword{models}
\keyword{regression}
diff --git a/man/gumbel.Rd b/man/gumbel.Rd
index c0052c7..0a7645c 100644
--- a/man/gumbel.Rd
+++ b/man/gumbel.Rd
@@ -9,11 +9,11 @@
}
\usage{
-gumbel(llocation = "identity", lscale = "loge", elocation = list(),
- escale = list(), iscale = NULL, R = NA, percentiles = c(95, 99),
+gumbel(llocation = "identity", lscale = "loge",
+ iscale = NULL, R = NA, percentiles = c(95, 99),
mpv = FALSE, zero = NULL)
-egumbel(llocation = "identity", lscale = "loge", elocation = list(),
- escale = list(), iscale = NULL, R = NA, percentiles = c(95, 99),
+egumbel(llocation = "identity", lscale = "loge",
+ iscale = NULL, R = NA, percentiles = c(95, 99),
mpv = FALSE, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -22,10 +22,6 @@ egumbel(llocation = "identity", lscale = "loge", elocation = list(),
Parameter link functions for \eqn{\mu}{mu} and \eqn{\sigma}{sigma}.
See \code{\link{Links}} for more choices.
- }
- \item{elocation, escale}{
- Extra argument for the \code{llocation} and \code{lscale} arguments.
- See \code{earg} in \code{\link{Links}} for general information.
}
\item{iscale}{
@@ -35,6 +31,7 @@ egumbel(llocation = "identity", lscale = "loge", elocation = list(),
In general, a larger value is better than a smaller value.
A \code{NULL} means an initial value is computed internally.
+
}
\item{R}{
@@ -71,7 +68,7 @@ egumbel(llocation = "identity", lscale = "loge", elocation = list(),
\item{zero}{
An integer-valued vector specifying which linear/additive predictors
are modelled as intercepts only. The value (possibly values) must
- be from the set \{1,2\} corresponding respectively to \eqn{\mu}{mu}
+ be from the set \{1, 2\} corresponding respectively to \eqn{\mu}{mu}
and \eqn{\sigma}{sigma}. By default all linear/additive predictors
are modelled as a linear combination of the explanatory variables.
@@ -169,7 +166,7 @@ egumbel(llocation = "identity", lscale = "loge", elocation = list(),
\code{na.action = na.pass}. The response matrix needs to be
padded with any missing values. With a multivariate response
one has a matrix \code{y}, say, where
- \code{y[,2]} contains the second order statistics etc.
+ \code{y[, 2]} contains the second order statistics etc.
% If a random variable \eqn{Y} has a \emph{reverse}
% \eqn{Gumbel(\mu,\sigma)}{Gumbel(mu,sigma)} distribution then \eqn{-Y}
@@ -181,11 +178,12 @@ egumbel(llocation = "identity", lscale = "loge", elocation = list(),
\seealso{
\code{\link{rgumbel}},
+ \code{\link{dgumbelII}},
\code{\link{cgumbel}},
\code{\link{guplot}},
\code{\link{gev}},
\code{\link{egev}},
-%\code{\link{ogev}},
+% \code{\link{ogev}},
\code{\link{venice}}.
@@ -193,42 +191,42 @@ egumbel(llocation = "identity", lscale = "loge", elocation = list(),
\examples{
# Example 1: Simulated data
-gdata = data.frame(y = rgumbel(n = 1000, loc = 100, scale = exp(1)))
-fit = vglm(y ~ 1, egumbel(perc = NULL), gdata, trace = TRUE)
+gdata <- data.frame(y = rgumbel(n = 1000, loc = 100, scale = exp(1)))
+fit <- vglm(y ~ 1, egumbel(perc = NULL), gdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
head(fitted(fit))
with(gdata, mean(y))
# Example 2: Venice data
-(fit = vglm(cbind(r1,r2,r3,r4,r5) ~ year, data = venice,
- gumbel(R = 365, mpv = TRUE), trace = TRUE))
+(fit <- vglm(cbind(r1,r2,r3,r4,r5) ~ year, data = venice,
+ gumbel(R = 365, mpv = TRUE), trace = TRUE))
head(fitted(fit))
-coef(fit, mat = TRUE)
+coef(fit, matrix = TRUE)
vcov(summary(fit))
sqrt(diag(vcov(summary(fit)))) # Standard errors
# Example 3: Try a nonparametric fit ---------------------
# Use the entire data set, including missing values
-y = as.matrix(venice[,paste("r",1:10,sep = "")])
-fit1 = vgam(y ~ s(year, df = 3), gumbel(R = 365, mpv = TRUE),
- data = venice, trace = TRUE, na.action = na.pass)
-fit1 at y[4:5,] # NAs used to pad the matrix
+y <- as.matrix(venice[, paste("r", 1:10, sep = "")])
+fit1 <- vgam(y ~ s(year, df = 3), gumbel(R = 365, mpv = TRUE),
+ data = venice, trace = TRUE, na.action = na.pass)
+depvar(fit1)[4:5, ] # NAs used to pad the matrix
\dontrun{
# Plot the component functions
-par(mfrow = c(2,1), mar = c(5,4,.2,1)+0.1, xpd = TRUE)
+par(mfrow = c(2, 1), mar = c(5, 4, 0.2, 1) + 0.1, xpd = TRUE)
plot(fit1, se = TRUE, lcol = "blue", scol = "green", lty = 1,
lwd = 2, slwd = 2, slty = "dashed")
# Quantile plot --- plots all the fitted values
-par(mfrow = c(1,1), bty = "l", mar = c(4,4,.2,3)+0.1, xpd = TRUE, las = 1)
-qtplot(fit1, mpv = TRUE, lcol = c(1,2,5), tcol = c(1,2,5), lwd = 2,
+par(mfrow = c(1, 1), bty = "l", mar = c(4, 4, 0.2, 3) + 0.1, xpd = TRUE, las = 1)
+qtplot(fit1, mpv = TRUE, lcol = c(1, 2,5), tcol = c(1, 2,5), lwd = 2,
pcol = "blue", tadj = 0.1, ylab = "Sea level (cm)")
# Plot the 99 percentile only
-par(mfrow = c(1,1), mar = c(3,4,.2,1)+0.1, xpd = TRUE)
+par(mfrow = c(1, 1), mar = c(3, 4, 0.2, 1) + 0.1, xpd = TRUE)
year = venice[["year"]]
matplot(year, y, ylab = "Sea level (cm)", type = "n")
matpoints(year, y, pch = "*", col = "blue")
@@ -237,12 +235,12 @@ lines(year, fitted(fit1)[,"99\%"], lwd = 2, col = "red")
# Check the 99 percentiles with a smoothing spline.
# Nb. (1-0.99) * 365 = 3.65 is approx. 4, meaning the 4th order
# statistic is approximately the 99 percentile.
-par(mfrow = c(1,1), mar = c(3,4,2,1)+0.1, xpd = TRUE, lwd = 2)
-plot(year, y[,4], ylab = "Sea level (cm)", type = "n",
+par(mfrow = c(1, 1), mar = c(3, 4, 2, 1) + 0.1, xpd = TRUE, lwd = 2)
+plot(year, y[, 4], ylab = "Sea level (cm)", type = "n",
main = "Red is 99 percentile, Green is a smoothing spline")
-points(year, y[,4], pch = "4", col = "blue")
+points(year, y[, 4], pch = "4", col = "blue")
lines(year, fitted(fit1)[,"99\%"], lty = 1, col = "red")
-lines(smooth.spline(year, y[,4], df = 4), col = "darkgreen", lty = 2)
+lines(smooth.spline(year, y[, 4], df = 4), col = "darkgreen", lty = 2)
}
}
\keyword{models}
diff --git a/man/gumbelII.Rd b/man/gumbelII.Rd
new file mode 100644
index 0000000..6e08164
--- /dev/null
+++ b/man/gumbelII.Rd
@@ -0,0 +1,149 @@
+\name{gumbelII}
+\alias{gumbelII}
+%\alias{gumbelIIff}
+%\alias{gumbelII.lsh}
+%\alias{gumbelII3}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Gumbel-II Distribution Family Function }
+\description{
+ Maximum likelihood estimation of the 2-parameter Gumbel-II distribution.
+
+}
+\usage{
+gumbelII(lshape = "loge", lscale = "loge",
+ ishape = NULL, iscale = NULL,
+ probs.y = c(0.2, 0.5, 0.8),
+ perc.out = NULL, imethod = 1, zero = -2)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lshape, lscale}{
+ Parameter link functions applied to the
+ (positive) shape parameter (called \eqn{a} below) and
+ (positive) scale parameter (called \eqn{b} below).
+ See \code{\link{Links}} for more choices.
+
+
+ }
+
+% \item{eshape, escale}{
+% eshape = list(), escale = list(),
+% Extra argument for the respective links.
+% See \code{earg} in \code{\link{Links}} for general information.
+% }
+
+ Parameter link functions applied to the
+ \item{ishape, iscale}{
+ Optional initial values for the shape and scale parameters.
+
+
+ }
+ \item{imethod}{
+ See \code{\link{weibull}}.
+
+
+ }
+ \item{zero, probs.y}{
+ Details at \code{\link{CommonVGAMffArguments}}.
+
+ }
+ \item{perc.out}{
+ If the fitted values are to be quantiles then set this
+ argument to be the percentiles of these, e.g., 50 for median.
+
+ }
+
+}
+\details{
+ The Gumbel-II density for a response \eqn{Y} is
+ \deqn{f(y;a,b) = a y^{a-1} \exp[-(y/b)^a] / (b^a)}{%
+ f(y;a,b) = a y^(a-1) * exp(-(y/b)^a) / [b^a]}
+ for \eqn{a > 0}, \eqn{b > 0}, \eqn{y > 0}.
+ The cumulative distribution function is
+ \deqn{F(y;a,b) = \exp[-(y/b)^{-a}].}{%
+ F(y;a,b) = exp(-(y/b)^(-a)).}
+ The mean of \eqn{Y} is \eqn{b \, \Gamma(1 - 1/a)}{b * gamma(1 - 1/a)}
+ (returned as the fitted values)
+ when \eqn{a>1},
+ and the variance is \eqn{b^2\,\Gamma(1-2/a)}{b^2 * Gamma(1-2/a)} when
+ \eqn{a>2}.
+ This distribution looks similar to \code{\link{weibull}}, and is
+ due to Gumbel (1954).
+
+
+ This \pkg{VGAM} family function currently does not handle censored data.
+ Fisher scoring is used to estimate the two parameters.
+ Probably similar regularity conditions hold for this distribution
+ compared to the Weibull distribution.
+
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+
+}
+\references{
+
+
+Gumbel, E. J. (1954).
+Statistical theory of extreme values and some practical applications.
+\emph{Applied Mathematics Series}, volume 33,
+U.S. Department of Commerce, National Bureau of Standards, USA.
+
+
+
+}
+\author{ T. W. Yee }
+\note{
+ See \code{\link{weibull}}.
+ This \pkg{VGAM} family function handles multiple responses.
+
+
+
+}
+%\section{Warning}{
+% This function is under development to handle other censoring situations.
+% The version of this function which will handle censored data will be
+% called \code{cengumbelII()}. It is currently being written and will use
+% \code{\link{SurvS4}} as input.
+% It should be released in later versions of \pkg{VGAM}.
+%
+%
+% If the shape parameter is less than two then misleading inference may
+% result, e.g., in the \code{summary} and \code{vcov} of the object.
+%
+%
+%}
+
+\seealso{
+ \code{\link{dgumbelII}},
+ \code{\link{gumbel}},
+ \code{\link{gev}}.
+
+
+}
+\examples{
+gdata <- data.frame(x2 = runif(nn <- 1000))
+gdata <- transform(gdata, eta1 = -1,
+ eta2 = -1 + 0.1 * x2,
+ ceta1 = 0,
+ ceta2 = 1)
+gdata <- transform(gdata, shape1 = exp(eta1),
+ shape2 = exp(eta2),
+ scale1 = exp(ceta1),
+ scale2 = exp(ceta2))
+gdata <- transform(gdata,
+ y1 = rgumbelII(nn, shape = shape1, scale = scale1),
+ y2 = rgumbelII(nn, shape = shape2, scale = scale2))
+
+fit <- vglm(cbind(y1, y2) ~ x2,
+ gumbelII(zero = c(1, 2, 4)), gdata, trace = TRUE)
+coef(fit, matrix = TRUE)
+vcov(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
diff --git a/man/gumbelIIUC.Rd b/man/gumbelIIUC.Rd
new file mode 100644
index 0000000..a83cb4b
--- /dev/null
+++ b/man/gumbelIIUC.Rd
@@ -0,0 +1,77 @@
+\name{Gumbel-II}
+\alias{Gumbel-II}
+\alias{dgumbelII}
+\alias{pgumbelII}
+\alias{qgumbelII}
+\alias{rgumbelII}
+\title{The Gumbel-II Distribution}
+\description{
+ Density, cumulative distribution function,
+ quantile function
+ and
+ random generation for
+ the Gumbel-II distribution.
+
+}
+\usage{
+dgumbelII(x, shape, scale = 1, log = FALSE)
+pgumbelII(q, shape, scale = 1)
+qgumbelII(p, shape, scale = 1)
+rgumbelII(n, shape, scale = 1)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. }
+ \item{log}{
+ Logical.
+ If \code{log = TRUE} then the logarithm of the density is returned.
+
+ }
+ \item{shape, scale}{positive shape and scale parameters. }
+
+}
+\value{
+ \code{dgumbelII} gives the density,
+ \code{pgumbelII} gives the cumulative distribution function,
+ \code{qgumbelII} gives the quantile function, and
+ \code{rgumbelII} generates random deviates.
+
+
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{gumbelII}} for details.
+
+}
+%\note{
+%
+%}
+\seealso{
+ \code{\link{gumbelII}},
+ \code{\link{dgumbel}}.
+
+
+}
+\examples{
+probs <- seq(0.01, 0.99, by = 0.01)
+Shape <- exp( 0.5); Scale <- exp(1);
+max(abs(pgumbelII(qgumbelII(p = probs, Shape, Scale),
+ Shape, Scale) - probs)) # Should be 0
+
+\dontrun{ x <- seq(-0.1, 10, by = 0.01);
+plot(x, dgumbelII(x, Shape, Scale), type = "l", col = "blue", las = 1,
+ main = "Blue is density, orange is cumulative distribution function",
+ sub = "Purple lines are the 10,20,...,90 percentiles",
+ ylab = "", ylim = 0:1)
+abline(h = 0, col = "blue", lty = 2)
+lines(x, pgumbelII(x, Shape, Scale), col = "orange")
+probs <- seq(0.1, 0.9, by = 0.1)
+Q <- qgumbelII(probs, Shape, Scale)
+lines(Q, dgumbelII(Q, Shape, Scale), col = "purple", lty = 3, type = "h")
+pgumbelII(Q, Shape, Scale) - probs # Should be all zero
+abline(h = probs, col = "purple", lty = 3) }
+}
+\keyword{distribution}
+
+
diff --git a/man/gumbelIbiv.Rd b/man/gumbelIbiv.Rd
index ea037c2..2a402e8 100644
--- a/man/gumbelIbiv.Rd
+++ b/man/gumbelIbiv.Rd
@@ -8,7 +8,7 @@
}
\usage{
-gumbelIbiv(lapar="identity", earg=list(), iapar=NULL, imethod=1)
+gumbelIbiv(lapar = "identity", iapar = NULL, imethod = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -18,11 +18,6 @@ gumbelIbiv(lapar="identity", earg=list(), iapar=NULL, imethod=1)
See \code{\link{Links}} for more choices.
}
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{iapar}{
Numeric. Optional initial value for \eqn{\alpha}{alpha}.
By default, an initial value is chosen internally.
@@ -49,15 +44,19 @@ gumbelIbiv(lapar="identity", earg=list(), iapar=NULL, imethod=1)
The marginal distributions are an exponential distribution with
unit mean.
+
A variant of Newton-Raphson is used, which only seems to work for an
intercept model.
It is a very good idea to set \code{trace=TRUE}.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
+
}
\references{
@@ -66,6 +65,7 @@ Castillo, E., Hadi, A. S., Balakrishnan, N. Sarabia, J. S. (2005)
\emph{Extreme Value and Related Models with Applications in Engineering and Science},
Hoboken, NJ, USA: Wiley-Interscience.
+
}
\author{ T. W. Yee }
\note{
@@ -74,19 +74,22 @@ Hoboken, NJ, USA: Wiley-Interscience.
This is because each marginal distribution corresponds to a
exponential distribution with unit mean.
+
This \pkg{VGAM} family function should be used with caution.
+
}
\seealso{
\code{\link{morgenstern}}.
+
}
\examples{
-nn = 1000
-gdata = data.frame(y1 = rexp(nn), y2 = rexp(nn))
-\dontrun{ with(gdata, plot(cbind(y1,y2))) }
-fit = vglm(cbind(y1, y2) ~ 1, fam = gumbelIbiv, gdata, trace = TRUE)
+nn <- 1000
+gdata <- data.frame(y1 = rexp(nn), y2 = rexp(nn))
+\dontrun{ with(gdata, plot(cbind(y1, y2))) }
+fit <- vglm(cbind(y1, y2) ~ 1, fam = gumbelIbiv, gdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
head(fitted(fit))
diff --git a/man/gumbelUC.Rd b/man/gumbelUC.Rd
index 4aa028f..7c1ced6 100644
--- a/man/gumbelUC.Rd
+++ b/man/gumbelUC.Rd
@@ -10,6 +10,7 @@
generation for the Gumbel distribution with
location parameter \code{location} and
scale parameter \code{scale}.
+
}
\usage{
dgumbel(x, location = 0, scale = 1, log = FALSE)
@@ -57,22 +58,29 @@ rgumbel(n, location = 0, scale = 1)
where \eqn{\gamma}{gamma} is Euler's constant (which can be
obtained as \code{-digamma(1)}).
+
See \code{\link{gumbel}}, the \pkg{VGAM} family function
for estimating the two parameters by maximum likelihood estimation,
for formulae and other details.
Apart from \code{n}, all the above arguments may be vectors and
are recyled to the appropriate length if necessary.
+
+
}
\value{
\code{dgumbel} gives the density,
\code{pgumbel} gives the distribution function,
\code{qgumbel} gives the quantile function, and
\code{rgumbel} generates random deviates.
+
+
}
\references{
Coles, S. (2001)
\emph{An Introduction to Statistical Modeling of Extreme Values}.
London: Springer-Verlag.
+
+
}
\author{ T. W. Yee }
\note{
@@ -80,26 +88,26 @@ rgumbel(n, location = 0, scale = 1)
can estimate the parameters of a Gumbel distribution using
maximum likelihood estimation.
+
}
\seealso{
\code{\link{gumbel}},
\code{\link{egumbel}},
- \code{\link{gev}}.
+ \code{\link{gev}},
+ \code{\link{dgompertz}}.
+
+
}
\examples{
-mu = 1; sigma = 2
-y = rgumbel(n = 100, loc=mu, scale=sigma)
-mean(y)
-mu - sigma * digamma(1) # population mean
-var(y)
-sigma^2 * pi^2 / 6 # population variance
-
-
-\dontrun{
-x = seq(-2.5, 3.5, by = 0.01)
-loc = 0; sigma = 1
-plot(x, dgumbel(x, loc, sigma), type = "l", col = "blue", ylim=c(0,1),
+mu <- 1; sigma <- 2;
+y <- rgumbel(n = 100, loc = mu, scale = sigma)
+c(mean(y), mu - sigma * digamma(1)) # Sample and population means
+c(var(y), sigma^2 * pi^2 / 6) # Sample and population variances
+
+\dontrun{ x <- seq(-2.5, 3.5, by = 0.01)
+loc <- 0; sigma <- 1
+plot(x, dgumbel(x, loc, sigma), type = "l", col = "blue", ylim = c(0, 1),
main = "Blue is density, red is cumulative distribution function",
sub = "Purple are 5,10,...,95 percentiles", ylab = "", las = 1)
abline(h = 0, col = "blue", lty = 2)
@@ -107,8 +115,7 @@ lines(qgumbel(seq(0.05, 0.95, by = 0.05), loc, sigma),
dgumbel(qgumbel(seq(0.05, 0.95, by = 0.05), loc, sigma), loc, sigma),
col = "purple", lty = 3, type = "h")
lines(x, pgumbel(x, loc, sigma), type = "l", col = "red")
-abline(h = 0, lty = 2)
-}
+abline(h = 0, lty = 2) }
}
\keyword{distribution}
diff --git a/man/guplot.Rd b/man/guplot.Rd
index c236557..b1314d8 100644
--- a/man/guplot.Rd
+++ b/man/guplot.Rd
@@ -12,8 +12,8 @@
}
\usage{
guplot(object, ...)
-guplot.default(y, main="Gumbel Plot",
- xlab="Reduced data", ylab="Observed data", type="p", ...)
+guplot.default(y, main = "Gumbel Plot",
+ xlab = "Reduced data", ylab = "Observed data", type = "p", ...)
guplot.vlm(object, ...)
}
%- maybe also 'usage' for other objects documented here.
@@ -91,7 +91,7 @@ guplot.vlm(object, ...)
}
-\examples{\dontrun{guplot(rnorm(500), las=1) -> ii
+\examples{\dontrun{guplot(rnorm(500), las = 1) -> ii
names(ii)
guplot(with(venice, r1), col = "blue") # Venice sea levels data
diff --git a/man/hormone.Rd b/man/hormone.Rd
new file mode 100644
index 0000000..5af3fcc
--- /dev/null
+++ b/man/hormone.Rd
@@ -0,0 +1,119 @@
+\name{hormone}
+\alias{hormone}
+\docType{data}
+\title{
+ Hormone Data
+
+}
+\description{
+ A data set described in Carroll and Ruppert (1988)
+ concerning hormone assay.
+
+%% ~~ A concise (1-5 lines) description of the dataset. ~~
+}
+\usage{data(hormone)}
+\format{
+ A data frame with 85 observations on the following 2 variables.
+
+ \describe{
+ \item{\code{X}}{a numeric vector, suitable as the x-axis in
+ a scatter plot.
+
+ }
+ \item{\code{Y}}{a numeric vector, suitable as the y-axis in
+ a scatter plot.
+
+ }
+ }
+}
+\details{
+%% ~~ If necessary, more details than the __description__ above ~~
+
+The data is described in
+Carroll and Ruppert (1988).
+
+
+}
+%\source{
+
+% Originally,
+
+%}
+\references{
+
+ Carroll, R. J. and Ruppert, D. (1988)
+ \emph{Transformation and Weighting in Regression}.
+ New York, USA: Chapman & Hall.
+
+
+ Yee, T. W. (2012)
+ Two-parameter reduced-rank vector generalized linear models.
+ \emph{In preparation}.
+
+}
+
+\seealso{
+ \code{\link{normal1}},
+ \code{\link{rrvglm}}.
+
+
+}
+
+
+
+\examples{
+data(hormone)
+summary(hormone)
+
+modelI <-rrvglm(Y ~ 1 + X, data = hormone, trace = TRUE,
+ normal1(zero = NULL, lsd = "identity", imethod = 2))
+
+# Alternative way to fit modelI
+modelI.other <- vglm(Y ~ 1 + X, data = hormone, trace = TRUE,
+ normal1(zero = NULL, lsd = "identity"))
+
+# Inferior to modelI
+modelII <- vglm(Y ~ 1 + X, data = hormone, trace = TRUE,
+ family = normal1(zero = NULL))
+
+logLik(modelI)
+logLik(modelII) # Less than logLik(modelI)
+
+
+# Reproduce Equations (1)--(3) on p.65 of Carroll and Ruppert (1988)
+
+# Equation (1)
+hormone <- transform(hormone, rX = 1 / X)
+clist <- list("(Intercept)" = diag(2), X = diag(2), rX = rbind(0, 1))
+fit1 <- vglm(Y ~ 1 + X + rX, family = normal1(zero = NULL),
+ constraints = clist, data = hormone, trace = TRUE)
+coef(fit1, matrix = TRUE)
+summary(fit1) # Actually, the intercepts do not seem significant
+\dontrun{ plot(Y ~ X, hormone, col = "blue")
+lines(fitted(fit1) ~ X, hormone, col = "orange") }
+
+# Equation (2)
+fit2 <- rrvglm(Y ~ 1 + X, normal1(zero = NULL), hormone, trace = TRUE)
+coef(fit2, matrix = TRUE)
+\dontrun{ plot(Y ~ X, hormone, col = "blue")
+lines(fitted(fit2) ~ X, hormone, col = "red")
+# Add +- 2 SEs
+lines(fitted(fit2) + 2 * exp(predict(fit2)[, "log(sd)"]) ~ X,
+ hormone, col = "orange")
+lines(fitted(fit2) - 2 * exp(predict(fit2)[, "log(sd)"]) ~ X,
+ hormone, col = "orange") }
+
+# Equation (3)
+# Does not fit well because the loge link for the mean is not good.
+fit3 <- rrvglm(Y ~ 1 + X, maxit = 300, data = hormone, trace = TRUE,
+ normal1(lmean = "loge", zero = NULL))
+coef(fit3, matrix = TRUE)
+\dontrun{ plot(Y ~ X, hormone, col = "blue") # Does not look okay.
+lines(exp(predict(fit3)[, 1]) ~ X, hormone, col = "red")
+# Add +- 2 SEs
+lines(fitted(fit3) + 2 * exp(predict(fit3)[, "log(sd)"]) ~ X,
+ hormone, col = "orange")
+lines(fitted(fit3) - 2 * exp(predict(fit3)[, "log(sd)"]) ~ X,
+ hormone, col = "orange") }
+}
+\keyword{datasets}
diff --git a/man/hspider.Rd b/man/hspider.Rd
index 749c67f..b4f7011 100644
--- a/man/hspider.Rd
+++ b/man/hspider.Rd
@@ -70,15 +70,15 @@ hspider[,1:6]=scale(hspider[,1:6]) # Standardize the environmental variables
# Fit a rank-1 binomial CAO
-hsbin = hspider # Binary species data
-hsbin[,-(1:6)] = as.numeric(hsbin[,-(1:6)] > 0)
+hsbin <- hspider # Binary species data
+hsbin[,-(1:6)] <- as.numeric(hsbin[,-(1:6)] > 0)
set.seed(123)
-ahsb1 = cao(cbind(Alopcune,Arctlute,Auloalbi,Zoraspin) ~
- WaterCon + ReflLux, family = binomialff(mv=TRUE),
+ahsb1 <- cao(cbind(Alopcune,Arctlute,Auloalbi,Zoraspin) ~
+ WaterCon + ReflLux, family = binomialff(mv = TRUE),
df1.nl = 2.2, Bestof=3, data = hsbin)
-par(mfrow=2:1, las=1)
-lvplot(ahsb1, type="predictors", llwd=2, ylab="logit p", lcol=1:9)
-persp(ahsb1, rug=TRUE, col=1:10, lwd=2)
+par(mfrow = 2:1, las = 1)
+lvplot(ahsb1, type = "predictors", llwd = 2, ylab = "logit p", lcol = 1:9)
+persp(ahsb1, rug = TRUE, col = 1:10, lwd = 2)
coef(ahsb1)
}
}
diff --git a/man/huber.Rd b/man/huber.Rd
index 7c29848..8ae879b 100644
--- a/man/huber.Rd
+++ b/man/huber.Rd
@@ -10,10 +10,9 @@
}
\usage{
-huber1(llocation = "identity", elocation = list(),
- k = 0.862, imethod = 1)
-huber(llocation = "identity", lscale = "loge", elocation = list(),
- escale = list(), k = 0.862, imethod = 1, zero = 2)
+huber1(llocation = "identity", k = 0.862, imethod = 1)
+huber(llocation = "identity", lscale = "loge",
+ k = 0.862, imethod = 1, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -23,12 +22,6 @@ huber(llocation = "identity", lscale = "loge", elocation = list(),
}
- \item{elocation, escale}{
- List. Extra argument for the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
-
- }
\item{k}{
Tuning constant.
See \code{\link{rhuber}} for more information.
@@ -100,12 +93,12 @@ huber(llocation = "identity", lscale = "loge", elocation = list(),
}
\examples{
-set.seed(1231); NN = 30; coef1 = 1; coef2 = 10
-hdata = data.frame(x2 = sort(runif(NN)))
-hdata = transform(hdata, y = rhuber(NN, mu = coef1 + coef2 * x2))
+set.seed(1231); NN <- 30; coef1 <- 1; coef2 <- 10
+hdata <- data.frame(x2 = sort(runif(NN)))
+hdata <- transform(hdata, y = rhuber(NN, mu = coef1 + coef2 * x2))
-hdata$x2[1] = 0.0 # Add an outlier
-hdata$y[1] = 10
+hdata$x2[1] <- 0.0 # Add an outlier
+hdata$y[1] <- 10
fit.huber <- vglm(y ~ x2, huber (imethod = 3), hdata, trace = TRUE)
fit.huber1 <- vglm(y ~ x2, huber1(imethod = 3), hdata, trace = TRUE)
diff --git a/man/huberUC.Rd b/man/huberUC.Rd
index e1ee7af..bfb94be 100644
--- a/man/huberUC.Rd
+++ b/man/huberUC.Rd
@@ -92,19 +92,19 @@ edhuber(1:5, k = 1.5)
rhuber(5)
# Plot cdf and pdf
-\dontrun{ mu = 3; xx = seq(-2, 7, len = 100)
+\dontrun{ mu <- 3; xx <- seq(-2, 7, len = 100)
plot(xx, dhuber(xx, mu = mu), type = "l", col = "blue", las = 1, ylab = "",
main = "blue is density, red is cumulative distribution function",
sub = "Purple lines are the 10,20,...,90 percentiles",
ylim = 0:1)
abline(h = 0, col = "blue", lty = 2)
lines(xx, phuber(xx, mu = mu), type = "l", col = "red")
-probs = seq(0.1, 0.9, by = 0.1)
-Q = qhuber(probs, mu = mu)
+probs <- seq(0.1, 0.9, by = 0.1)
+Q <- qhuber(probs, mu = mu)
lines(Q, dhuber(Q, mu = mu), col = "purple", lty = 3, type = "h")
lines(Q, phuber(Q, mu = mu), col = "purple", lty = 3, type = "h")
abline(h = probs, col = "purple", lty = 3)
-phuber(Q, mu = mu) - probs # Should be all zero
+phuber(Q, mu = mu) - probs # Should be all 0s
}
}
\keyword{distribution}
diff --git a/man/huggins91.Rd b/man/huggins91.Rd
index 97aa7c9..ce5fab8 100644
--- a/man/huggins91.Rd
+++ b/man/huggins91.Rd
@@ -10,12 +10,12 @@
}
\usage{
-huggins91(link = "logit", earg = list(), parallel = TRUE,
+huggins91(link = "logit", parallel = TRUE,
iprob = NULL, eim.not.oim = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link, earg, parallel, iprob}{
+ \item{link, parallel, iprob}{
See \code{\link{CommonVGAMffArguments}} for information.
The \code{parallel} argument should generally be left alone since
parallelism is assumed by Huggins (1991).
@@ -130,16 +130,17 @@ approach to capture experiments.
\code{\link{rhuggins91}}.
\code{\link{posbinomial}}.
+
}
\examples{
-set.seed(123); nTimePts = 5
-hdata = rhuggins91(n = 1000, nTimePts = nTimePts, pvars = 2)
+set.seed(123); nTimePts <- 5
+hdata <- rhuggins91(n = 1000, nTimePts = nTimePts, pvars = 2)
# The truth: xcoeffs are c(-2, 1, 2) and capeffect = -1
# Model 1 is where capture history information is used
-model1 = vglm(cbind(y1, y2, y3, y4, y5) ~ x2 + Chistory,
+model1 <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2 + Chistory,
huggins91, data = hdata, trace = TRUE,
xij = list(Chistory ~ ch0 + zch0 +
ch1 + zch1 + ch2 + zch2 +
@@ -148,25 +149,25 @@ model1 = vglm(cbind(y1, y2, y3, y4, y5) ~ x2 + Chistory,
ch0 + ch1 + ch2 + ch3 + ch4 +
zch0 + zch1 + zch2 + zch3 + zch4)
-coef(model1, matrix = TRUE) # Biased!!
+coef(model1, matrix = TRUE) # Biased!!
summary(model1)
head(fitted(model1))
head(model.matrix(model1, type = "vlm"), 21)
head(hdata)
# Model 2 is where no capture history information is used
-model2 = vglm(cbind(y1, y2, y3, y4, y5) ~ x2,
+model2 <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2,
huggins91, data = hdata, trace = TRUE)
-coef(model2, matrix = TRUE) # Biased!!
+coef(model2, matrix = TRUE) # Biased!!
summary(model2)
# Model 3 is where half the capture history is used in both
# the numerator and denominator
set.seed(123); nTimePts = 5
-hdata2 = rhuggins91(n = 1000, nTimePts = nTimePts, pvars = 2,
+hdata2 <- rhuggins91(n = 1000, nTimePts = nTimePts, pvars = 2,
double.ch = TRUE)
head(hdata2) # 2s have replaced the 1s in hdata
-model3 = vglm(cbind(y1, y2, y3, y4, y5) ~ x2 + Chistory,
+model3 <- vglm(cbind(y1, y2, y3, y4, y5) ~ x2 + Chistory,
huggins91, data = hdata2, trace = TRUE,
xij = list(Chistory ~ ch0 + zch0 +
ch1 + zch1 + ch2 + zch2 +
@@ -174,7 +175,7 @@ model3 = vglm(cbind(y1, y2, y3, y4, y5) ~ x2 + Chistory,
form2 = ~ 1 + x2 + Chistory +
ch0 + ch1 + ch2 + ch3 + ch4 +
zch0 + zch1 + zch2 + zch3 + zch4)
-coef(model3, matrix = TRUE) # Biased!!
+coef(model3, matrix = TRUE) # Biased!!
}
\keyword{models}
\keyword{regression}
diff --git a/man/huggins91UC.Rd b/man/huggins91UC.Rd
index 2281d9b..462a179 100644
--- a/man/huggins91UC.Rd
+++ b/man/huggins91UC.Rd
@@ -12,7 +12,7 @@
\usage{
rhuggins91(n, nTimePts = 5, pvars = length(xcoeff), xcoeff = c(-2, 1, 2),
capeffect = -1, double.ch = FALSE,
- link = "logit", earg = list())
+ link = "logit", earg.link = FALSE)
dhuggins91(x, prob, prob0 = prob, log = FALSE)
}
%- maybe also 'usage' for other objects documented here.
@@ -20,16 +20,19 @@ dhuggins91(x, prob, prob0 = prob, log = FALSE)
\item{x}{
response vector or matrix.
Should have values of 0s or 1s.
+
}
\item{nTimePts}{Number of sampling occasions.
Called \eqn{T} in \code{\link{huggins91}}.
+
}
\item{n}{number of observations.
Usually a single positive integer, else the length of the vector
is used.
+
}
\item{capeffect}{
@@ -50,6 +53,7 @@ dhuggins91(x, prob, prob0 = prob, log = FALSE)
(this is a compromise of the Huggins (1991) model where the full
capture history only appears in the numerator).
+
}
\item{pvars}{ Number of other numeric covariates that make up
@@ -59,6 +63,7 @@ dhuggins91(x, prob, prob0 = prob, log = FALSE)
independent standard \code{\link[stats:Uniform]{runif}} random variates.
The first \code{pvars} elements of \code{xcoeff} are used.
+
}
\item{xcoeff}{
@@ -67,10 +72,14 @@ dhuggins91(x, prob, prob0 = prob, log = FALSE)
and the first is for the intercept.
The length of \code{xcoeff} must be at least \code{pvars}.
+
}
- \item{link, earg}{
- Used to generate the probabilities for capture at each occasion.
+ \item{link, earg.link}{
+ The former is used to generate the probabilities for capture
+ at each occasion.
+ Other details at \code{\link{CommonVGAMffArguments}}.
+
}
\item{prob, prob0}{
@@ -81,9 +90,12 @@ dhuggins91(x, prob, prob0 = prob, log = FALSE)
be free of any capture history, i.e., as if it had
never been caught before.
+
}
\item{log}{
Logical. Return the logarithm of the answer?
+
+
}
}
@@ -113,11 +125,13 @@ dhuggins91(x, prob, prob0 = prob, log = FALSE)
These functions are experimental and do not follow the
usual conventions of \code{d}- and \code{r}-type R functions.
+
}
\seealso{
\code{\link{huggins91}}.
+
}
\examples{
set.seed(123); rhuggins91(n = 10)
diff --git a/man/hyperg.Rd b/man/hyperg.Rd
index 0a9825e..79a6b47 100644
--- a/man/hyperg.Rd
+++ b/man/hyperg.Rd
@@ -10,7 +10,7 @@
}
\usage{
-hyperg(N = NULL, D = NULL, lprob = "logit", earg = list(), iprob = NULL)
+hyperg(N = NULL, D = NULL, lprob = "logit", iprob = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -32,11 +32,6 @@ hyperg(N = NULL, D = NULL, lprob = "logit", earg = list(), iprob = NULL)
See \code{\link{Links}} for more choices.
}
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{iprob}{
Optional initial value for the probabilities.
The default is to choose initial values internally.
@@ -115,20 +110,20 @@ New York: Wiley-Interscience, Third edition.
}
\examples{
-nn = 100
-m = 5 # number of white balls in the population
-k = rep(4, len = nn) # sample sizes
-n = 4 # number of black balls in the population
-y = rhyper(nn = nn, m = m, n = n, k = k)
-yprop = y / k # sample proportions
+nn <- 100
+m <- 5 # number of white balls in the population
+k <- rep(4, len = nn) # sample sizes
+n <- 4 # number of black balls in the population
+y <- rhyper(nn = nn, m = m, n = n, k = k)
+yprop <- y / k # sample proportions
# N is unknown, D is known. Both models are equivalent:
-fit = vglm(cbind(y,k-y) ~ 1, hyperg(D = m), trace = TRUE, crit = "c")
-fit = vglm(yprop ~ 1, hyperg(D=m), weight = k, trace = TRUE, crit = "c")
+fit <- vglm(cbind(y,k-y) ~ 1, hyperg(D = m), trace = TRUE, crit = "c")
+fit <- vglm(yprop ~ 1, hyperg(D=m), weight = k, trace = TRUE, crit = "c")
# N is known, D is unknown. Both models are equivalent:
-fit = vglm(cbind(y,k-y) ~ 1, hyperg(N = m+n), trace = TRUE, crit = "l")
-fit = vglm(yprop ~ 1, hyperg(N = m+n), weight = k, trace = TRUE, crit = "l")
+fit <- vglm(cbind(y,k-y) ~ 1, hyperg(N = m+n), trace = TRUE, crit = "l")
+fit <- vglm(yprop ~ 1, hyperg(N = m+n), weight = k, trace = TRUE, crit = "l")
coef(fit, matrix = TRUE)
Coef(fit) # Should be equal to the true population proportion
diff --git a/man/hypersecant.Rd b/man/hypersecant.Rd
index c9187f1..d063433 100644
--- a/man/hypersecant.Rd
+++ b/man/hypersecant.Rd
@@ -9,10 +9,8 @@
}
\usage{
-hypersecant(link.theta = "elogit", earg = if(link.theta == "elogit")
- list(min = -pi/2, max = pi/2) else list(), init.theta = NULL)
-hypersecant.1(link.theta = "elogit", earg = if(link.theta == "elogit")
- list(min = -pi/2, max = pi/2) else list(), init.theta = NULL)
+hypersecant(link.theta = elogit(min = -pi/2, max = pi/2), init.theta = NULL)
+hypersecant.1(link.theta = elogit(min = -pi/2, max = pi/2), init.theta = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -21,11 +19,6 @@ hypersecant.1(link.theta = "elogit", earg = if(link.theta == "elogit")
See \code{\link{Links}} for more choices.
}
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{init.theta}{
Optional initial value for \eqn{\theta}{theta}.
If failure to converge occurs, try some other value.
@@ -43,6 +36,7 @@ hypersecant.1(link.theta = "elogit", earg = if(link.theta == "elogit")
The mean of \eqn{Y} is \eqn{\tan(\theta)}{tan(theta)} (returned as
the fitted values).
+
Another parameterization is used for \code{hypersecant.1()}.
This uses
\deqn{f(y)=(\cos(\theta)/\pi) \times y^{-0.5+\theta/\pi} \times
@@ -54,8 +48,10 @@ hypersecant.1(link.theta = "elogit", earg = if(link.theta == "elogit")
(returned as the fitted values) and the variance is
\eqn{(\pi^2 - 4 \theta^2) / (8\pi^2)}{(pi^2 - 4*theta^2) / (8*pi^2)}.
+
For both parameterizations Newton-Raphson is same as Fisher scoring.
+
}
\value{
@@ -63,6 +59,7 @@ hypersecant.1(link.theta = "elogit", earg = if(link.theta == "elogit")
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
Jorgensen, B. (1997)
@@ -70,6 +67,7 @@ hypersecant.1(link.theta = "elogit", earg = if(link.theta == "elogit")
London: Chapman & Hall.
% p.101, Eqn (3.37).
+
}
\author{ T. W. Yee }
@@ -78,16 +76,18 @@ hypersecant.1(link.theta = "elogit", earg = if(link.theta == "elogit")
%}
\seealso{
\code{\link{elogit}}.
+
+
}
\examples{
-hdata = data.frame(x = rnorm(nn <- 200))
-hdata = transform(hdata, y = rnorm(nn)) # Not very good data!
-fit = vglm(y ~ x, hypersecant, hdata, trace = TRUE, crit = "coef")
+hdata <- data.frame(x2 = rnorm(nn <- 200))
+hdata <- transform(hdata, y = rnorm(nn)) # Not very good data!
+fit <- vglm(y ~ x2, hypersecant, hdata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
fit at misc$earg
# Not recommended:
-fit = vglm(y ~ x, hypersecant(link = "identity"), hdata, trace = TRUE)
+fit <- vglm(y ~ x2, hypersecant(link = "identity"), hdata, trace = TRUE)
coef(fit, matrix = TRUE)
fit at misc$earg
}
diff --git a/man/hzeta.Rd b/man/hzeta.Rd
index 58a8676..2934190 100644
--- a/man/hzeta.Rd
+++ b/man/hzeta.Rd
@@ -7,7 +7,7 @@
}
\usage{
-hzeta(link = "loglog", earg=list(), ialpha = NULL, nsimEIM=100)
+hzeta(link = "loglog", ialpha = NULL, nsimEIM=100)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -18,11 +18,6 @@ hzeta(link = "loglog", earg=list(), ialpha = NULL, nsimEIM=100)
the mean is finite.
}
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{ialpha}{
Optional initial value for the (positive) parameter.
The default is to obtain an initial value internally. Use this argument
@@ -77,9 +72,9 @@ hzeta(link = "loglog", earg=list(), ialpha = NULL, nsimEIM=100)
}
\examples{
-alpha = exp(exp(-0.1)) # The parameter
-hdata = data.frame(y = rhzeta(n = 1000, alpha))
-fit = vglm(y ~ 1, hzeta, hdata, trace = TRUE, crit = "c")
+alpha <- exp(exp(-0.1)) # The parameter
+hdata <- data.frame(y = rhzeta(n = 1000, alpha))
+fit <- vglm(y ~ 1, hzeta, hdata, trace = TRUE, crit = "c")
coef(fit, matrix = TRUE)
Coef(fit) # Useful for intercept-only models; should be same as alpha
c(with(hdata, mean(y)), head(fitted(fit), 1))
diff --git a/man/hzetaUC.Rd b/man/hzetaUC.Rd
index a9fbb47..357cae5 100644
--- a/man/hzetaUC.Rd
+++ b/man/hzetaUC.Rd
@@ -11,7 +11,7 @@
}
\usage{
-dhzeta(x, alpha, log=FALSE)
+dhzeta(x, alpha, log = FALSE)
phzeta(q, alpha)
qhzeta(p, alpha)
rhzeta(n, alpha)
@@ -79,11 +79,11 @@ round(1000 * dhzeta(1:8, 2))
table(rhzeta(1000, 2))
\dontrun{
-alpha = 1.1; x = 1:10
-plot(x, dhzeta(x, alpha=alpha), type="h", ylim=0:1, lwd=2,
- sub=paste("alpha =", alpha), las=1, col="blue", ylab="Probability",
- main="Haight's zeta: blue=density; red=distribution function")
-lines(x+0.1, phzeta(x, alpha=alpha), col="red", lty=3, lwd=2, type="h")
+alpha <- 1.1; x <- 1:10
+plot(x, dhzeta(x, alpha = alpha), type = "h", ylim = 0:1, lwd = 2,
+ sub = paste("alpha =", alpha), las = 1, col = "blue", ylab = "Probability",
+ main = "Haight's zeta: blue = density; red = distribution function")
+lines(x+0.1, phzeta(x, alpha = alpha), col = "red", lty = 3, lwd = 2, type = "h")
}
}
\keyword{distribution}
diff --git a/man/iam.Rd b/man/iam.Rd
index 0f0898d..ec45b2c 100644
--- a/man/iam.Rd
+++ b/man/iam.Rd
@@ -9,7 +9,7 @@
}
\usage{
-iam(j, k, M, hbw = M, both = FALSE, diag = TRUE)
+iam(j, k, M, both = FALSE, diag = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -28,10 +28,6 @@ iam(j, k, M, hbw = M, both = FALSE, diag = TRUE)
dimension of each positive-definite symmetric matrix.
}
- \item{hbw}{
- Defunct.
-
- }
\item{both}{
Logical. Return both the row and column indices?
See below for more details.
@@ -61,6 +57,7 @@ iam(j, k, M, hbw = M, both = FALSE, diag = TRUE)
This is called the \emph{matrix-band} format and is used by
the \pkg{VGAM} package.
+
}
\value{
This function has a dual purpose depending on the value of \code{both}.
@@ -113,14 +110,14 @@ iam(NULL, NULL, M = 3, both = TRUE) # Return the row and column indices
dirichlet()@weight
-M = 4
-temp1 = iam(NA, NA, M = M, both = TRUE)
-mat1 = matrix(NA, M, M)
+M <- 4
+temp1 <- iam(NA, NA, M = M, both = TRUE)
+mat1 <- matrix(NA, M, M)
mat1[cbind(temp1$row, temp1$col)] = 1:length(temp1$row)
mat1 # More commonly used
-temp2 = iam(NA, NA, M = M, both = TRUE, diag = FALSE)
-mat2 = matrix(NA, M, M)
+temp2 <- iam(NA, NA, M = M, both = TRUE, diag = FALSE)
+mat2 <- matrix(NA, M, M)
mat2[cbind(temp2$row, temp2$col)] = 1:length(temp2$row)
mat2 # Rarely used
}
diff --git a/man/identity.Rd b/man/identity.Rd
index e294679..eb3287d 100644
--- a/man/identity.Rd
+++ b/man/identity.Rd
@@ -9,41 +9,24 @@
}
\usage{
-identity(theta, earg = list(), inverse = FALSE, deriv = 0,
- short = TRUE, tag = FALSE)
-nidentity(theta, earg = list(), inverse = FALSE, deriv = 0,
- short = TRUE, tag = FALSE)
+ identity(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
+nidentity(theta, inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{theta}{
Numeric or character.
See below for further details.
- }
- \item{earg}{
- Extra argument for passing in additional information.
- Here, the argument is unused.
- }
- \item{inverse}{
- Logical. If \code{TRUE} the inverse function is computed.
}
- \item{deriv}{
- Order of the derivative. Integer with value 0, 1 or 2.
- }
- \item{short}{
- Used for labelling the \code{blurb} slot of a
- \code{\link{vglmff-class}} object.
+ \item{inverse, deriv, short, tag}{
+ Details at \code{\link{Links}}.
- }
- \item{tag}{
- Used for labelling the linear/additive predictor in the
- \code{initialize} slot of a \code{\link{vglmff-class}} object.
- Contains a little more information if \code{TRUE}.
}
+
}
\details{
The identity link function \eqn{g(\theta)=\theta}{g(theta)=theta}
@@ -52,8 +35,7 @@ nidentity(theta, earg = list(), inverse = FALSE, deriv = 0,
numerical problems because the estimates lie outside the permitted
range. Consequently, the result may contain
\code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
- The arguments \code{short} and \code{tag} are used only if
- \code{theta} is character.
+
The function \code{nidentity} is the negative-identity link function and
corresponds to \eqn{g(\theta)=-\theta}{g(theta)=-theta}.
@@ -62,6 +44,7 @@ nidentity(theta, earg = list(), inverse = FALSE, deriv = 0,
\eqn{\xi=-k}{xi=-k} for the shape parameter and the other half use \eqn{k}
instead of \eqn{\xi}{xi}.
+
}
\value{
For \code{identity()}:
@@ -73,13 +56,17 @@ nidentity(theta, earg = list(), inverse = FALSE, deriv = 0,
if \code{inverse = FALSE},
else if \code{inverse = TRUE} then it returns the reciprocal.
+
For \code{nidentity()}: the results are similar to \code{identity()}
except for a sign change in most cases.
+
}
\references{
McCullagh, P. and Nelder, J. A. (1989)
\emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
+
}
\author{ Thomas W. Yee }
@@ -90,14 +77,15 @@ nidentity(theta, earg = list(), inverse = FALSE, deriv = 0,
\code{\link{probit}},
\code{\link{powl}}.
+
}
\examples{
identity((-5):5)
-identity((-5):5, deriv=1)
-identity((-5):5, deriv=2)
+identity((-5):5, deriv = 1)
+identity((-5):5, deriv = 2)
nidentity((-5):5)
-nidentity((-5):5, deriv=1)
-nidentity((-5):5, deriv=2)
+nidentity((-5):5, deriv = 1)
+nidentity((-5):5, deriv = 2)
}
\keyword{math}
\keyword{models}
diff --git a/man/inv.gaussianff.Rd b/man/inv.gaussianff.Rd
index 82aa5ff..2138a0f 100644
--- a/man/inv.gaussianff.Rd
+++ b/man/inv.gaussianff.Rd
@@ -9,8 +9,8 @@
}
\usage{
inv.gaussianff(lmu = "loge", llambda = "loge",
- emu = list(), elambda = list(),
- imethod = 1, ilambda = 1,
+ imethod = 1, ilambda = NULL,
+ parallel = FALSE, intercept.apply = FALSE,
shrinkage.init = 0.99, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -21,13 +21,8 @@ inv.gaussianff(lmu = "loge", llambda = "loge",
See \code{\link{Links}} for more choices.
}
- \item{emu, elambda}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
- \item{ilambda}{
- Initial value for the \eqn{\lambda}{lambda} parameter.
+ \item{ilambda, parallel, intercept.apply}{
+ See \code{\link{CommonVGAMffArguments}} for more information.
}
\item{imethod, shrinkage.init, zero}{
@@ -52,6 +47,9 @@ inv.gaussianff(lmu = "loge", llambda = "loge",
By default, \eqn{\eta_1=\log(\mu)}{eta1=log(mu)} and
\eqn{\eta_2=\log(\lambda)}{eta2=log(lambda)}.
The mean is returned as the fitted values.
+ This \pkg{VGAM} family function can handle multiple
+ responses (inputted as a matrix).
+
}
diff --git a/man/invbinomial.Rd b/man/invbinomial.Rd
index ee27483..c574ed0 100644
--- a/man/invbinomial.Rd
+++ b/man/invbinomial.Rd
@@ -8,9 +8,8 @@
}
\usage{
-invbinomial(lrho="elogit", llambda="loge",
- erho=if(lrho=="elogit") list(min = 0.5, max = 1) else list(),
- elambda=list(), irho=NULL, ilambda=NULL, zero=NULL)
+invbinomial(lrho = elogit(min = 0.5, max = 1),
+ llambda = "loge", irho = NULL, ilambda = NULL, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -19,11 +18,6 @@ invbinomial(lrho="elogit", llambda="loge",
See \code{\link{Links}} for more choices.
}
- \item{erho, elambda}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{irho, ilambda}{
Numeric.
Optional initial values for \eqn{\rho}{rho} and \eqn{\lambda}{lambda}.
@@ -56,12 +50,14 @@ invbinomial(lrho="elogit", llambda="loge",
It holds that \eqn{Var(Y) > E(Y)} so that the inverse binomial distribution
is overdispersed compared with the Poisson distribution.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\references{
Yanagimoto, T. (1989)
@@ -69,15 +65,18 @@ invbinomial(lrho="elogit", llambda="loge",
\emph{Communications in Statistics: Theory and Methods},
\bold{18}, 3625--3633.
+
Jain, G. C. and Consul, P. C. (1971)
A generalized negative binomial distribution.
\emph{SIAM Journal on Applied Mathematics},
\bold{21}, 501--513.
+
Jorgensen, B. (1997)
\emph{The Theory of Dispersion Models}.
London: Chapman & Hall
+
}
\author{ T. W. Yee }
\note{
@@ -93,22 +92,24 @@ information matrix.
Yet to do: using the mean and the reciprocal of \eqn{\lambda}{lambda}
results in a EIM that is diagonal.
+
}
\seealso{
\code{\link{negbinomial}},
\code{\link{poissonff}}.
+
}
\examples{
-idata = data.frame(y = rnbinom(n <- 1000, mu=exp(3), size=exp(1)))
-fit <- vglm(y ~ 1, invbinomial, idata, trace=TRUE)
+idata <- data.frame(y = rnbinom(n <- 1000, mu = exp(3), size = exp(1)))
+fit <- vglm(y ~ 1, invbinomial, idata, trace = TRUE)
with(idata, c(mean(y), head(fitted(fit), 1)))
summary(fit)
-coef(fit, matrix=TRUE)
+coef(fit, matrix = TRUE)
Coef(fit)
-sum(weights(fit)) # sum of the prior weights
-sum(weights(fit, type="w")) # sum of the working weights
+sum(weights(fit)) # sum of the prior weights
+sum(weights(fit, type = "work")) # sum of the working weights
}
\keyword{models}
\keyword{regression}
diff --git a/man/invlomax.Rd b/man/invlomax.Rd
index 4df531e..22ba763 100644
--- a/man/invlomax.Rd
+++ b/man/invlomax.Rd
@@ -8,7 +8,6 @@
}
\usage{
invlomax(lscale = "loge", lshape2.p = "loge",
- escale = list(), eshape2.p = list(),
iscale = NULL, ishape2.p = 1, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -20,11 +19,6 @@ invlomax(lscale = "loge", lshape2.p = "loge",
See \code{\link{Links}} for more choices.
}
- \item{escale, eshape2.p}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{iscale, ishape2.p}{
Optional initial values for \code{scale} and \code{p}.
@@ -97,10 +91,10 @@ Hoboken, NJ, USA: Wiley-Interscience.
}
\examples{
-idata = data.frame(y = rinvlomax(n = 2000, exp(2), exp(1)))
-fit = vglm(y ~ 1, invlomax, idata, trace = TRUE)
-fit = vglm(y ~ 1, invlomax(iscale = exp(2), ishape2.p = exp(1)), idata,
- trace = TRUE, epsilon = 1e-8)
+idata <- data.frame(y = rinvlomax(n = 2000, exp(2), exp(1)))
+fit <- vglm(y ~ 1, invlomax, idata, trace = TRUE)
+fit <- vglm(y ~ 1, invlomax(iscale = exp(2), ishape2.p = exp(1)), idata,
+ trace = TRUE, epsilon = 1e-8)
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/invparalogistic.Rd b/man/invparalogistic.Rd
index cc5fcb0..86587ab 100644
--- a/man/invparalogistic.Rd
+++ b/man/invparalogistic.Rd
@@ -8,7 +8,6 @@
}
\usage{
invparalogistic(lshape1.a = "loge", lscale = "loge",
- eshape1.a = list(), escale = list(),
ishape1.a = 2, iscale = NULL, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -20,11 +19,6 @@ invparalogistic(lshape1.a = "loge", lscale = "loge",
See \code{\link{Links}} for more choices.
}
- \item{eshape1.a, escale}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{ishape1.a, iscale}{
Optional initial values for \code{a} and \code{scale}.
@@ -97,10 +91,10 @@ Hoboken, NJ, USA: Wiley-Interscience.
}
\examples{
-idata = data.frame(y = rinvparalogistic(n = 3000, exp(1), exp(2)))
-fit = vglm(y ~ 1, invparalogistic, idata, trace = TRUE)
-fit = vglm(y ~ 1, invparalogistic(ishape1.a = 2.7, iscale = 7.3),
- idata, trace = TRUE, epsilon = 1e-8)
+idata <- data.frame(y = rinvparalogistic(n = 3000, exp(1), exp(2)))
+fit <- vglm(y ~ 1, invparalogistic, idata, trace = TRUE)
+fit <- vglm(y ~ 1, invparalogistic(ishape1.a = 2.7, iscale = 7.3),
+ idata, trace = TRUE, epsilon = 1e-8)
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/invparalogisticUC.Rd b/man/invparalogisticUC.Rd
index efe5ff1..b74c21b 100644
--- a/man/invparalogisticUC.Rd
+++ b/man/invparalogisticUC.Rd
@@ -66,9 +66,9 @@ Hoboken, NJ, USA: Wiley-Interscience.
}
\examples{
-idata = data.frame(y = rinvparalogistic(n = 3000, 4, 6))
-fit = vglm(y ~ 1, invparalogistic(ishape1.a = 2.1),
- idata, trace = TRUE, crit = "coef")
+idata <- data.frame(y = rinvparalogistic(n = 3000, 4, 6))
+fit <- vglm(y ~ 1, invparalogistic(ishape1.a = 2.1),
+ idata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit)
}
diff --git a/man/is.parallel.Rd b/man/is.parallel.Rd
new file mode 100644
index 0000000..cbccf3d
--- /dev/null
+++ b/man/is.parallel.Rd
@@ -0,0 +1,69 @@
+\name{is.parallel}
+\alias{is.parallel}
+\alias{is.parallel.matrix}
+\alias{is.parallel.vglm}
+\title{Parallelism Constraint Matrices}
+\description{
+ Returns a logical vector from a test of whether an object such
+ as a matrix or VGLM object
+ corresponds to a parallelism assumption.
+
+}
+\usage{
+is.parallel.matrix(object, \dots)
+is.parallel.vglm(object, type = c("term", "lm"), \dots)
+}
+\arguments{
+ \item{object}{
+ an object such as a constraint matrix or
+ a \code{\link{vglm}} object.
+
+ }
+ \item{type}{
+ passed into \code{\link{constraints}}.
+
+ }
+ \item{\dots}{
+ additional optional arguments.
+ Currently unused.
+
+ }
+}
+\details{
+ These functions may be useful for categorical models
+ such as
+ \code{\link{propodds}},
+ \code{\link{cumulative}},
+ \code{\link{acat}},
+ \code{\link{cratio}},
+ \code{\link{sratio}},
+ \code{\link{multinomial}}.
+
+}
+\value{
+ A vector of logicals, testing whether each constraint matrix
+ is a one-column matrix of ones.
+ Note that parallelism can still be thought of as holding if
+ the constraint matrix has a non-zero but constant values, however,
+ this is currently not implemented.
+ No checking is done that the constraint matrices have the
+ same number of rows.
+
+
+}
+\seealso{
+ \code{\link{constraints}},
+ \code{\link{vglm}}.
+
+
+}
+
+
+\examples{
+fit <- vglm(educ ~ bs(age) * sex + ethnic, cumulative(parallel = TRUE), xs.nz)
+is.parallel(fit)
+is.parallel(fit, type = "lm") # For each column of the LM matrix
+}
+
+\keyword{models}
+\keyword{regression}
diff --git a/man/is.zero.Rd b/man/is.zero.Rd
new file mode 100644
index 0000000..685affa
--- /dev/null
+++ b/man/is.zero.Rd
@@ -0,0 +1,63 @@
+\name{is.zero}
+\alias{is.zero}
+\alias{is.zero.matrix}
+\alias{is.zero.vglm}
+\title{Zero Constraint Matrices}
+\description{
+ Returns a logical vector from a test of whether an object such
+ as a matrix or VGLM object
+ corresponds to a 'zero' assumption.
+
+}
+\usage{
+is.zero.matrix(object, \dots)
+is.zero.vglm(object, \dots)
+}
+\arguments{
+ \item{object}{
+ an object such as a coefficient matrix of a \code{\link{vglm}} object,
+ or a \code{\link{vglm}} object.
+
+ }
+ \item{\dots}{
+ additional optional arguments.
+ Currently unused.
+
+ }
+}
+\details{
+ These functions test the effect of the \code{zero} argument
+ on a \code{\link{vglm}} object or the coefficient matrix
+ of a \code{\link{vglm}} object.
+ The latter is obtained by \code{coef(vglmObject, matrix = TRUE)}.
+
+
+
+}
+\value{
+ A vector of logicals, testing whether each linear/additive predictor
+ has the \code{zero} argument applied to it.
+ It is \code{TRUE} if that linear/additive predictor is
+ intercept-only, i.e., all other regression coefficients
+ are set to zero.
+
+ No checking is done for the intercept term at all, i.e., that
+ it was estimated in the first place.
+
+
+}
+\seealso{
+ \code{\link{constraints}},
+ \code{\link{vglm}}.
+
+
+}
+
+\examples{
+fit <- vglm(cbind(cat, dog) ~ bs(age) * sex + ethnic, binom2.or, xs.nz)
+is.zero(fit)
+is.zero(coef(fit, matrix = TRUE))
+}
+
+\keyword{models}
+\keyword{regression}
diff --git a/man/koenker.Rd b/man/koenker.Rd
index a36ac08..5fe87f8 100644
--- a/man/koenker.Rd
+++ b/man/koenker.Rd
@@ -9,8 +9,7 @@
}
\usage{
koenker(percentile = 50, llocation = "identity", lscale = "loge",
- elocation = list(), escale = list(), ilocation = NULL,
- iscale = NULL, imethod = 1, zero = 2)
+ ilocation = NULL, iscale = NULL, imethod = 1, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -20,7 +19,7 @@ koenker(percentile = 50, llocation = "identity", lscale = "loge",
They will be returned as `fitted values'.
}
- \item{llocation, lscale, elocation, escale}{
+ \item{llocation, lscale}{
See \code{\link{Links}} for more choices,
and \code{\link{CommonVGAMffArguments}}.
@@ -94,22 +93,22 @@ When are expectiles percentiles? (solution)
}
\examples{
set.seed(123); nn <- 1000
-kdat <- data.frame(x2 = sort(runif(nn)))
-kdat <- transform(kdat, mylocat = 1 + 3 * x2,
+kdata <- data.frame(x2 = sort(runif(nn)))
+kdata <- transform(kdata, mylocat = 1 + 3 * x2,
myscale = 1)
-kdat <- transform(kdat, y = rkoenker(nn, loc = mylocat, scale = myscale))
-fit <- vglm(y ~ x2, koenker(perc = c(1, 50, 99)), kdat, trace = TRUE)
-fit2 <- vglm(y ~ x2, studentt2(df = 2), kdat, trace = TRUE) # 'same' as fit
+kdata <- transform(kdata, y = rkoenker(nn, loc = mylocat, scale = myscale))
+fit <- vglm(y ~ x2, koenker(perc = c(1, 50, 99)), kdata, trace = TRUE)
+fit2 <- vglm(y ~ x2, studentt2(df = 2), kdata, trace = TRUE) # 'same' as fit
coef(fit, matrix = TRUE)
head(fitted(fit))
head(predict(fit))
# Nice plot of the results
-\dontrun{ plot(y ~ x2, kdat, col = "blue", las = 1,
+\dontrun{ plot(y ~ x2, kdata, col = "blue", las = 1,
sub = paste("n =", nn),
main = "Fitted quantiles/expectiles using Koenker's distribution")
-matplot(with(kdat, x2), fitted(fit), add = TRUE, type = "l", lwd = 3)
+matplot(with(kdata, x2), fitted(fit), add = TRUE, type = "l", lwd = 3)
legend("bottomright", lty = 1:3, lwd = 3, legend = colnames(fitted(fit)),
col = 1:3) }
diff --git a/man/koenkerUC.Rd b/man/koenkerUC.Rd
index a2a3336..d8744e0 100644
--- a/man/koenkerUC.Rd
+++ b/man/koenkerUC.Rd
@@ -61,27 +61,27 @@ rkoenker(n, location = 0, scale = 1)
}
\examples{
-my_p = 0.25; y = rkoenker(nn <- 5000)
+my_p <- 0.25; y <- rkoenker(nn <- 5000)
(myexp = qkoenker(my_p))
-sum(myexp - y[y <= myexp]) / sum(abs(myexp - y)) # Should be my_p
+sum(myexp - y[y <= myexp]) / sum(abs(myexp - y)) # Should be my_p
# Equivalently:
-I1 = mean(y <= myexp) * mean( myexp - y[y <= myexp])
-I2 = mean(y > myexp) * mean(-myexp + y[y > myexp])
+I1 <- mean(y <= myexp) * mean( myexp - y[y <= myexp])
+I2 <- mean(y > myexp) * mean(-myexp + y[y > myexp])
I1 / (I1 + I2) # Should be my_p
# Or:
-I1 = sum( myexp - y[y <= myexp])
-I2 = sum(-myexp + y[y > myexp])
+I1 <- sum( myexp - y[y <= myexp])
+I2 <- sum(-myexp + y[y > myexp])
# Non-standard Koenker distribution
-myloc = 1; myscale = 2
-yy = rkoenker(nn, myloc, myscale)
-(myexp = qkoenker(my_p, myloc, myscale))
+myloc <- 1; myscale <- 2
+yy <- rkoenker(nn, myloc, myscale)
+(myexp <- qkoenker(my_p, myloc, myscale))
sum(myexp - yy[yy <= myexp]) / sum(abs(myexp - yy)) # Should be my_p
-pkoenker(mean(yy), myloc, myscale) # Should be 0.5
+pkoenker(mean(yy), myloc, myscale) # Should be 0.5
abs(qkoenker(0.5, myloc, myscale) - mean(yy)) # Should be 0
-abs(pkoenker(myexp, myloc, myscale) - my_p) # Should be 0
+abs(pkoenker(myexp, myloc, myscale) - my_p) # Should be 0
integrate(f = dkoenker, lower = -Inf, upper = Inf,
- locat = myloc, scale = myscale) # Should be 1
+ locat = myloc, scale = myscale) # Should be 1
y <- seq(-7, 7, len = 201)
max(abs(dkoenker(y) - dt(y / sqrt(2), df = 2) / sqrt(2))) # Should be 0
diff --git a/man/kumar.Rd b/man/kumar.Rd
index 6c0ece5..3e89a20 100644
--- a/man/kumar.Rd
+++ b/man/kumar.Rd
@@ -9,7 +9,6 @@
}
\usage{
kumar(lshape1 = "loge", lshape2 = "loge",
- eshape1 = list(), eshape2 = list(),
ishape1 = NULL, ishape2 = NULL, grid.shape1 = c(0.4, 6.0),
tol12 = 1.0e-4, zero = NULL)
@@ -22,11 +21,13 @@ kumar(lshape1 = "loge", lshape2 = "loge",
See \code{\link{Links}} for more choices.
}
- \item{eshape1, eshape2}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
- }
+% \item{eshape1, eshape2}{
+% List. Extra argument for each of the links.
+% See \code{earg} in \code{\link{Links}} for general information.
+% eshape1 = list(), eshape2 = list(),
+% }
+
\item{ishape1, ishape2}{
Numeric.
Optional initial values for the two positive shape parameters.
@@ -61,6 +62,8 @@ kumar(lshape1 = "loge", lshape2 = "loge",
Applications of the Kumaraswamy distribution include the storage
volume of a water reservoir.
Fisher scoring is implemented.
+ Handles multiple responses (matrix input).
+
}
diff --git a/man/kumarUC.Rd b/man/kumarUC.Rd
index 5b5caf9..568bedc 100644
--- a/man/kumarUC.Rd
+++ b/man/kumarUC.Rd
@@ -54,7 +54,7 @@ rkumar(n, shape1, shape2)
}
\examples{
\dontrun{
-shape1 <- 2; shape2 <- 2; nn <- 201; # shape1 = shape2 = 0.5;
+shape1 <- 2; shape2 <- 2; nn <- 201; # shape1 <- shape2 <- 0.5;
x <- seq(-0.05, 1.05, len = nn)
plot(x, dkumar(x, shape1, shape2), type = "l", las = 1, ylim = c(0,1.5),
ylab = paste("fkumar(shape1 = ", shape1, ", shape2 = ", shape2, ")"),
diff --git a/man/lambertW.Rd b/man/lambertW.Rd
index a0ce990..5d58ae0 100644
--- a/man/lambertW.Rd
+++ b/man/lambertW.Rd
@@ -6,6 +6,7 @@ The Lambert W function
}
\description{
Computes the Lambert \emph{W} function for real values.
+
}
\usage{
lambertW(x, tolerance = 1e-10, maxit = 50)
@@ -14,12 +15,15 @@ lambertW(x, tolerance = 1e-10, maxit = 50)
\arguments{
\item{x}{
A vector of reals.
+
}
\item{tolerance}{
Accuracy desired.
+
}
\item{maxit}{
Maximum number of iterations of third-order Halley's method.
+
}
}
\details{
@@ -31,6 +35,7 @@ Maximum number of iterations of third-order Halley's method.
possible real values, and currently only the upper branch
is computed.
+
}
\value{
This function returns the principal branch of the \eqn{W} function
@@ -38,6 +43,7 @@ Maximum number of iterations of third-order Halley's method.
It returns \eqn{W(z) \geq -1}{W(z) >= -1},
and \code{NA} for \eqn{z < -1/e}.
+
}
\references{
Corless, R. M. and Gonnet, G. H. and
@@ -46,6 +52,7 @@ On the Lambert \eqn{W} function.
\emph{Advances in Computational Mathematics},
\bold{5}(4), 329--359.
+
}
\author{
T. W. Yee
@@ -61,6 +68,7 @@ the lower branch for
real \eqn{-1/e \leq z < 0}{-1/e <= z < 0};
this would give \eqn{W(z) \leq -1}{W(z) <= -1}.
+
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
@@ -69,6 +77,7 @@ this would give \eqn{W(z) \leq -1}{W(z) <= -1}.
\code{\link[base:log]{log}},
\code{\link[base:log]{exp}}.
+
}
\examples{ \dontrun{
curve(lambertW, -exp(-1), 3, xlim = c(-1, 3), ylim = c(-2, 1), col = "red")
diff --git a/man/laplace.Rd b/man/laplace.Rd
index b39a348..04a8dde 100644
--- a/man/laplace.Rd
+++ b/man/laplace.Rd
@@ -8,9 +8,8 @@
}
\usage{
-laplace(llocation = "identity", lscale = "loge", elocation = list(),
- escale = list(), ilocation = NULL, iscale = NULL,
- imethod = 1, zero = 2)
+laplace(llocation = "identity", lscale = "loge",
+ ilocation = NULL, iscale = NULL, imethod = 1, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -20,25 +19,24 @@ laplace(llocation = "identity", lscale = "loge", elocation = list(),
See \code{\link{Links}} for more choices.
}
- \item{elocation, escale}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{ilocation, iscale}{
Optional initial values.
If given, it must be numeric and values are recycled to the
appropriate length.
The default is to choose the value internally.
+
+
}
\item{imethod}{
Initialization method.
Either the value 1 or 2.
+
}
\item{zero}{
See \code{\link{CommonVGAMffArguments}} for more information.
+
}
}
\details{
@@ -57,25 +55,32 @@ laplace(llocation = "identity", lscale = "loge", elocation = list(),
distribution} by Kotz et al. (2001), and the density is symmetric
about \eqn{a}.
+
For \code{y ~ 1} (where \code{y} is the response) the maximum likelihood
estimate (MLE) for the location parameter is the sample median, and
the MLE for \eqn{b} is \code{mean(abs(y-location))} (replace
location by its MLE if unknown).
+
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
+
}
\references{
+
Kotz, S., Kozubowski, T. J. and Podgorski, K. (2001)
\emph{The Laplace distribution and generalizations:
a revisit with applications to communications,
economics, engineering, and finance},
Boston: Birkhauser.
+
}
\author{ T. W. Yee }
\section{Warning}{
@@ -84,12 +89,14 @@ Boston: Birkhauser.
therefore misleading inferences may result,
e.g., in the \code{summary} and \code{vcov} of the object.
+
}
\note{
This family function uses Fisher scoring.
Convergence may be slow for non-intercept-only models;
half-stepping is frequently required.
+
}
\seealso{
@@ -98,18 +105,19 @@ Boston: Birkhauser.
\code{\link{exponential}},
\code{\link[stats]{median}}.
+
}
\examples{
-lddat = data.frame(y = rlaplace(nn <- 100, loc = 2, scale = exp(1)))
-fit = vglm(y ~ 1, laplace, lddat, trace = TRUE, crit = "l")
+ldata <- data.frame(y = rlaplace(nn <- 100, loc = 2, scale = exp(1)))
+fit <- vglm(y ~ 1, laplace, ldata, trace = TRUE, crit = "l")
coef(fit, matrix = TRUE)
Coef(fit)
-with(lddat, median(y))
+with(ldata, median(y))
-lddat = data.frame(x = runif(nn <- 1001))
-lddat = transform(lddat, y = rlaplace(nn, loc = 2, scale = exp(-1+1*x)))
-coef(vglm(y ~ x, laplace(iloc = .2, imethod = 2, zero = 1), lddat,
+ldata <- data.frame(x = runif(nn <- 1001))
+ldata <- transform(ldata, y = rlaplace(nn, loc = 2, scale = exp(-1+1*x)))
+coef(vglm(y ~ x, laplace(iloc = .2, imethod = 2, zero = 1), ldata,
trace = TRUE), matrix = TRUE)
}
\keyword{models}
diff --git a/man/laplaceUC.Rd b/man/laplaceUC.Rd
index 4cce574..0fbb855 100644
--- a/man/laplaceUC.Rd
+++ b/man/laplaceUC.Rd
@@ -77,8 +77,8 @@ New York: Wiley-Interscience, Third edition.
\code{\link{laplace}}.
}
\examples{
-loc = 1; b = 2
-y = rlaplace(n = 100, loc = loc, scale = b)
+loc <- 1; b <- 2
+y <- rlaplace(n = 100, loc = loc, scale = b)
mean(y) # sample mean
loc # population mean
var(y) # sample variance
diff --git a/man/leipnik.Rd b/man/leipnik.Rd
index a0c4fca..5eba793 100644
--- a/man/leipnik.Rd
+++ b/man/leipnik.Rd
@@ -8,8 +8,7 @@
}
\usage{
-leipnik(lmu = "logit", llambda = "loge", emu=list(),
- elambda=list(), imu = NULL, ilambda = NULL)
+leipnik(lmu = "logit", llambda = "loge", imu = NULL, ilambda = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -23,11 +22,6 @@ leipnik(lmu = "logit", llambda = "loge", emu=list(),
\eqn{\lambda}{lambda}.
}
- \item{emu, elambda}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
}
\details{
The (transformed) Leipnik distribution has density function
@@ -49,6 +43,7 @@ leipnik(lmu = "logit", llambda = "loge", emu=list(),
Leipnik distribution. Here, both \eqn{x} and \eqn{\theta}{theta}
are in \eqn{(-1,1)}.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -56,12 +51,14 @@ leipnik(lmu = "logit", llambda = "loge", emu=list(),
\code{\link{rrvglm}}
and \code{\link{vgam}}.
+
}
\references{
Jorgensen, B. (1997)
\emph{The Theory of Dispersion Models}.
London: Chapman & Hall
+
Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1995)
\emph{Continuous Univariate Distributions},
2nd edition,
@@ -69,6 +66,7 @@ leipnik(lmu = "logit", llambda = "loge", emu=list(),
New York: Wiley.
(pages 612--617).
+
}
\author{ T. W. Yee }
\note{
@@ -79,6 +77,7 @@ leipnik(lmu = "logit", llambda = "loge", emu=list(),
Currently, this family function probably only really works for
intercept-only models, i.e., \code{y ~ 1} in the formula.
+
}
\section{Warning }{
@@ -87,25 +86,27 @@ leipnik(lmu = "logit", llambda = "loge", emu=list(),
bounds. One way to stop this is to choose \code{llambda="loge"},
however, \code{lambda} is then constrained to be positive.
+
}
\seealso{
\code{\link{mccullagh89}}.
+
}
\examples{
-ldat = data.frame(y = rnorm(n=2000, mean=0.5, sd=0.1)) # Not good data
-fit = vglm(y ~ 1, leipnik(ilambda=1), ldat, tr=TRUE, checkwz=FALSE)
-fit = vglm(y ~ 1, leipnik(ilambda=1,llam=logoff, elam=list(offset=1)),
- ldat, trace=TRUE, cri="coef")
+ldata <- data.frame(y = rnorm(n = 2000, mean = 0.5, sd = 0.1)) # Not proper data
+fit <- vglm(y ~ 1, leipnik(ilambda = 1), ldata, trace = TRUE, checkwz = FALSE)
+fit <- vglm(y ~ 1, leipnik(ilambda = 1, llambda = logoff(offset = 1)),
+ ldata, trace = TRUE, crit = "coef")
head(fitted(fit))
-with(ldat, mean(y))
+with(ldata, mean(y))
summary(fit)
-coef(fit, matrix=TRUE)
+coef(fit, matrix = TRUE)
Coef(fit)
-sum(weights(fit)) # sum of the prior weights
-sum(weights(fit, type="w")) # sum of the working weights
+sum(weights(fit)) # sum of the prior weights
+sum(weights(fit, type = "w")) # sum of the working weights
}
\keyword{models}
\keyword{regression}
diff --git a/man/lerch.Rd b/man/lerch.Rd
index 13588ce..9bfb4ab 100644
--- a/man/lerch.Rd
+++ b/man/lerch.Rd
@@ -7,7 +7,7 @@
}
\usage{
-lerch(x, s, v, tolerance=1.0e-10, iter=100)
+lerch(x, s, v, tolerance = 1.0e-10, iter = 100)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -26,6 +26,7 @@ lerch(x, s, v, tolerance=1.0e-10, iter=100)
If \code{iter} is too small then a result of \code{NA} may occur;
if so, try increasing its value.
+
}
}
\details{
@@ -42,6 +43,7 @@ lerch(x, s, v, tolerance=1.0e-10, iter=100)
Phi(x,s,v) = x^m Phi(x,s,v+m) + sum_{n=0}^{m-1} x^n / (n+v)^s . }
See the URL below for more information.
This function is a wrapper function for the C code described below.
+
}
\value{
@@ -50,20 +52,24 @@ lerch(x, s, v, tolerance=1.0e-10, iter=100)
If the above ranges of \eqn{x} and \eqn{v} are not satisfied,
or some numeric problems occur, then
this function will return a \code{NA} for those values.
-
+
+
}
\references{
\url{http://aksenov.freeshell.org/lerchphi/source/lerchphi.c}.
+
Bateman, H. (1953)
\emph{Higher Transcendental Functions}.
Volume 1. McGraw-Hill, NY, USA.
+
}
\author{
S. V. Aksenov and U. D. Jentschura wrote the C code.
The R wrapper function was written by T. W. Yee.
+
}
\note{
There are a number of special cases, e.g.,
@@ -74,6 +80,7 @@ lerch(x, s, v, tolerance=1.0e-10, iter=100)
The Lerch transcendental Phi function should not be confused with the
Lerch zeta function though they are quite similar.
+
}
\section{Warning }{
This function has not been thoroughly tested and contains bugs,
@@ -84,21 +91,23 @@ lerch(x, s, v, tolerance=1.0e-10, iter=100)
and underflow, especially near singularities. If any problems occur
then a \code{NA} will be returned.
+
}
\seealso{
\code{\link{zeta}}.
+
}
\examples{
\dontrun{
-s=2; v=1; x = seq(-1.1, 1.1, len=201)
-plot(x, lerch(x, s=s, v=v), type="l", col="red", las=1,
- main=paste("lerch(x, s=",s,", v=",v,")",sep=""))
-abline(v=0, h=1, lty="dashed")
+s <- 2; v <- 1; x <- seq(-1.1, 1.1, length = 201)
+plot(x, lerch(x, s = s, v = v), type = "l", col = "blue", las = 1,
+ main = paste("lerch(x, s = ", s,", v =", v, ")", sep = ""))
+abline(v = 0, h = 1, lty = "dashed", col = "gray")
-s = rnorm(n=100)
-max(abs(zeta(s)-lerch(x=1,s=s,v=1))) # This fails (a bug); should be 0
+s <- rnorm(n = 100)
+max(abs(zeta(s) - lerch(x = 1, s = s, v = 1))) # This fails (a bug); should be 0
}
}
\keyword{math}
diff --git a/man/leukemia.Rd b/man/leukemia.Rd
index c59c23b..2438928 100644
--- a/man/leukemia.Rd
+++ b/man/leukemia.Rd
@@ -21,9 +21,13 @@ data(leukemia)
\emph{Survival Analysis}.
John Wiley & Sons.
ISBN: 0-471-25218-2.
+
+
}
\note{
This data set has been transferred from \pkg{survival} and renamed
from \code{aml} to \code{leukemia}.
+
+
}
\keyword{datasets}
diff --git a/man/levy.Rd b/man/levy.Rd
index 50449ee..18a453c 100644
--- a/man/levy.Rd
+++ b/man/levy.Rd
@@ -5,10 +5,10 @@
\description{
Estimates the two parameters of the Levy distribution
by maximum likelihood estimation.
+
}
\usage{
-levy(delta = NULL, link.gamma = "loge", earg=list(),
- idelta = NULL, igamma = NULL)
+levy(delta = NULL, link.gamma = "loge", idelta = NULL, igamma = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -16,15 +16,12 @@ levy(delta = NULL, link.gamma = "loge", earg=list(),
Location parameter. May be assigned a known value,
otherwise it is estimated (the default).
+
}
\item{link.gamma}{
Parameter link function for the (positive) \eqn{\gamma}{gamma} parameter.
See \code{\link{Links}} for more choices.
- }
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
}
\item{idelta}{
@@ -32,11 +29,13 @@ levy(delta = NULL, link.gamma = "loge", earg=list(),
(if it is to be estimated).
By default, an initial value is chosen internally.
+
}
\item{igamma}{
Initial value for the \eqn{\gamma}{gamma} parameter.
By default, an initial value is chosen internally.
+
}
}
\details{
@@ -52,23 +51,27 @@ levy(delta = NULL, link.gamma = "loge", earg=list(),
where \eqn{\delta<y<\infty}{delta<y<Inf} and \eqn{\gamma>0}{gamma>0}.
The mean does not exist.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
Nolan, J. P. (2005)
\emph{Stable Distributions: Models for Heavy Tailed Data}.
+
}
\author{ T. W. Yee }
\note{
If \eqn{\delta}{delta} is given, then only one parameter is estimated
- and the default is \eqn{\eta_1=\log(\gamma)}{eta1=log(gamma)}. If
- \eqn{\delta}{delta} is not given, then \eqn{\eta_2=\delta}{eta2=delta}.
+ and the default is \eqn{\eta_1=\log(\gamma)}{eta1=log(gamma)}.
+ If \eqn{\delta}{delta} is not given, then \eqn{\eta_2=\delta}{eta2=delta}.
+
}
@@ -78,23 +81,24 @@ levy(delta = NULL, link.gamma = "loge", earg=list(),
The Nolan article is at
\url{http://academic2.american.edu/~jpnolan/stable/chap1.pdf}.
+
}
\examples{
-nn = 1000; delta = 0
-mygamma = 1 # log link ==> 0 is the answer
-ldat = data.frame(y = delta + mygamma/rnorm(nn)^2) # Levy(mygamma, delta)
+nn <- 1000; delta <- 0
+mygamma <- 1 # log link ==> 0 is the answer
+ldata <- data.frame(y = delta + mygamma/rnorm(nn)^2) # Levy(mygamma, delta)
# Cf. Table 1.1 of Nolan for Levy(1,0)
-with(ldat, sum(y > 1) / length(y)) # Should be 0.6827
-with(ldat, sum(y > 2) / length(y)) # Should be 0.5205
+with(ldata, sum(y > 1) / length(y)) # Should be 0.6827
+with(ldata, sum(y > 2) / length(y)) # Should be 0.5205
-fit = vglm(y ~ 1, levy(delta=delta), ldat, trace=TRUE) # 1 parameter
-fit = vglm(y ~ 1, levy(idelta=delta, igamma=mygamma),
- ldat, trace=TRUE) # 2 parameters
-coef(fit, matrix=TRUE)
+fit <- vglm(y ~ 1, levy(delta = delta), ldata, trace = TRUE) # 1 parameter
+fit <- vglm(y ~ 1, levy(idelta = delta, igamma = mygamma),
+ ldata, trace = TRUE) # 2 parameters
+coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
-head(weights(fit, type="w"))
+head(weights(fit, type = "w"))
}
\keyword{models}
\keyword{regression}
diff --git a/man/lgammaUC.Rd b/man/lgammaUC.Rd
index e8242cf..99466a9 100644
--- a/man/lgammaUC.Rd
+++ b/man/lgammaUC.Rd
@@ -12,6 +12,7 @@
scale parameter \code{scale} and
shape parameter \code{k}.
+
}
\usage{
dlgamma(x, location = 0, scale = 1, k = 1, log = FALSE)
@@ -56,11 +57,13 @@ London: Imperial College Press.
\code{n}, all the above arguments may be vectors and are recyled to
the appropriate length if necessary.
+
}
\note{
The \pkg{VGAM} family function \code{\link{lgamma3ff}} is
for the three parameter (nonstandard) log-gamma distribution.
+
}
\seealso{
\code{\link{lgammaff}},
@@ -69,8 +72,8 @@ London: Imperial College Press.
}
\examples{
-\dontrun{ loc = 1; Scale = 1.5; k = 1.4
-x = seq(-3.2, 5, by = 0.01)
+\dontrun{ loc <- 1; Scale <- 1.5; k <- 1.4
+x <- seq(-3.2, 5, by = 0.01)
plot(x, dlgamma(x, loc, Scale, k), type = "l", col = "blue", ylim = 0:1,
main = "Blue is density, orange is cumulative distribution function",
sub = "Purple are 5,10,...,95 percentiles", las = 1, ylab = "")
diff --git a/man/lgammaff.Rd b/man/lgammaff.Rd
index 46bb6a8..1cabedd 100644
--- a/man/lgammaff.Rd
+++ b/man/lgammaff.Rd
@@ -9,9 +9,8 @@
}
\usage{
-lgammaff(link = "loge", earg = list(), init.k = NULL)
+lgammaff(link = "loge", init.k = NULL)
lgamma3ff(llocation = "identity", lscale = "loge", lshape = "loge",
- elocation = list(), escale = list(), eshape = list(),
ilocation = NULL, iscale = NULL, ishape = 1, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -29,11 +28,6 @@ lgamma3ff(llocation = "identity", lscale = "loge", lshape = "loge",
See \code{\link{Links}} for more choices.
}
- \item{earg, elocation, escale, eshape}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{init.k, ishape}{
Initial value for \eqn{k}.
If given, it must be positive.
@@ -120,16 +114,16 @@ New York: Wiley.
}
\examples{
-ldata = data.frame(y = rlgamma(100, k = exp(1)))
-fit = vglm(y ~ 1, lgammaff, ldata, trace = TRUE, crit = "coef")
+ldata <- data.frame(y = rlgamma(100, k = exp(1)))
+fit <- vglm(y ~ 1, lgammaff, ldata, trace = TRUE, crit = "coef")
summary(fit)
coef(fit, matrix = TRUE)
Coef(fit)
-ldata = data.frame(x = runif(nn <- 5000)) # Another example
-ldata = transform(ldata, loc = -1 + 2 * x, Scale = exp(1))
-ldata = transform(ldata, y = rlgamma(nn, loc, scale = Scale, k = exp(0)))
-fit2 = vglm(y ~ x, lgamma3ff(zero = 2:3), ldata, trace = TRUE, crit = "c")
+ldata <- data.frame(x = runif(nn <- 5000)) # Another example
+ldata <- transform(ldata, loc = -1 + 2 * x, Scale = exp(1))
+ldata <- transform(ldata, y = rlgamma(nn, loc, scale = Scale, k = exp(0)))
+fit2 <- vglm(y ~ x, lgamma3ff(zero = 2:3), ldata, trace = TRUE, crit = "c")
coef(fit2, matrix = TRUE)
}
\keyword{models}
diff --git a/man/lindUC.Rd b/man/lindUC.Rd
new file mode 100644
index 0000000..ec46d0c
--- /dev/null
+++ b/man/lindUC.Rd
@@ -0,0 +1,69 @@
+\name{Lindley}
+\alias{Lindley}
+\alias{dlind}
+\alias{plind}
+%\alias{qlind}
+\alias{rlind}
+\title{The Lindley Distribution}
+\description{
+ Density, cumulative distribution function,
+% quantile function
+ and
+ random generation for
+ the Lindley distribution.
+
+}
+\usage{
+dlind(x, theta, log = FALSE)
+plind(q, theta)
+%qlind(p, theta)
+rlind(n, theta)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+% \item{p}{vector of probabilities.}
+ \item{n}{number of observations. }
+ \item{log}{
+ Logical.
+ If \code{log = TRUE} then the logarithm of the density is returned.
+
+ }
+ \item{theta}{positive parameter. }
+
+}
+\value{
+ \code{dlind} gives the density,
+ \code{plind} gives the cumulative distribution function, and
+% \code{qlind} gives the quantile function, and
+ \code{rlind} generates random deviates.
+
+
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{lindley}} for details.
+
+}
+%\note{
+%
+%}
+\seealso{
+ \code{\link{lindley}}.
+
+}
+\examples{
+theta <- exp(-1); x <- seq(0.0, 17, length = 700)
+dlind(0:10, theta)
+\dontrun{
+plot(x, dlind(x, theta), type = "l", las = 1, col = "blue",
+ main = "dlind(x, theta = exp(-1))")
+abline(h = 1, col = "grey", lty = "dashed") }
+}
+\keyword{distribution}
+
+
+% probs <- seq(0.01, 0.99, by = 0.01)
+% max(abs(plind(qlind(p = probs, theta), theta) - probs)) # Should be 0
+
+
+
diff --git a/man/lindley.Rd b/man/lindley.Rd
new file mode 100644
index 0000000..8b7696d
--- /dev/null
+++ b/man/lindley.Rd
@@ -0,0 +1,90 @@
+\name{lindley}
+\alias{lindley}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ 1-parameter Gamma Distribution }
+\description{
+ Estimates the (1-parameter) Lindley distribution
+ by maximum likelihood estimation.
+
+}
+\usage{
+lindley(link = "loge", itheta = NULL, zero = NULL)
+
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{link}{
+ Link function applied to the (positive) parameter.
+ See \code{\link{Links}} for more choices.
+
+ }
+
+% \item{earg}{
+% List. Extra argument for the link.
+% See \code{earg} in \code{\link{Links}} for general information.
+% }
+
+ \item{itheta, zero}{
+ See \code{\link{CommonVGAMffArguments}} for information.
+
+ }
+}
+\details{
+ The density function is given by
+ \deqn{f(y; \theta) = \theta^2 (1 + y) \exp(-\theta y) / (1 + \theta)}{%
+ f(y; theta) = theta^2 * (1 + y) * exp(-theta * y) / (1 + theta)}
+ for \eqn{theta > 0} and \eqn{y > 0}.
+ The mean of \eqn{Y} (returned as the fitted values)
+ is \eqn{\mu = (\theta + 2) / (\theta (\theta + 1))}{mu = (theta + 2) / (theta * (theta + 1))}.
+ The variance
+ is \eqn{(\theta^2 + 4 \theta + 2) / (\theta (\theta + 1))^2}{(theta^2 + 4 * theta + 2) / (theta * (theta + 1))^2}.
+
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}}
+ and \code{\link{vgam}}.
+
+
+}
+\references{
+
+Lindley, D. V. (1958)
+Fiducial distributions and Bayes' theorem.
+\emph{Journal of the Royal Statistical Society, Series B, Methodological},
+\bold{20}, 102--107.
+
+
+Ghitany, M. E. and Atieh, B. and Nadarajah, S. (2008)
+Lindley distribution and its application.
+\emph{Math. Comput. Simul.},
+\bold{78}, 493--506.
+
+
+}
+\author{ T. W. Yee }
+\note{
+ This \pkg{VGAM} family function can handle multiple
+ responses (inputted as a matrix).
+ Fisher scoring is implemented.
+
+
+}
+
+\seealso{
+ \code{\link{dlind}},
+ \code{\link{gamma2.ab}},
+
+
+}
+\examples{
+ldata <- data.frame(y = rlind(n = 1000, theta = exp(3)))
+fit <- vglm(y ~ 1, lindley, ldata, trace = TRUE, crit = "coef")
+coef(fit, matrix = TRUE)
+Coef(fit)
+summary(fit)
+}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/lino.Rd b/man/lino.Rd
index 339e502..5f6bf30 100644
--- a/man/lino.Rd
+++ b/man/lino.Rd
@@ -9,7 +9,6 @@
}
\usage{
lino(lshape1 = "loge", lshape2 = "loge", llambda = "loge",
- eshape1 = list(), eshape2 = list(), elambda = list(),
ishape1 = NULL, ishape2 = NULL, ilambda = 1, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -26,11 +25,6 @@ lino(lshape1 = "loge", lshape2 = "loge", llambda = "loge",
See \code{\link{Links}} for more choices.
}
- \item{eshape1, eshape2, elambda}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{ishape1, ishape2, ilambda}{
Initial values for the parameters. A \code{NULL} value means
one is computed internally. The argument \code{ilambda} must
@@ -120,17 +114,17 @@ lino(lshape1 = "loge", lshape2 = "loge", llambda = "loge",
}
\examples{
-ldat1 = data.frame(y = rbeta(n=1000, exp(0.5), exp(1))) # ~ standard beta
-fit = vglm(y ~ 1, lino, ldat1, trace=TRUE)
-coef(fit, mat=TRUE)
+ldata1 <- data.frame(y = rbeta(n = 1000, exp(0.5), exp(1))) # ~ standard beta
+fit <- vglm(y ~ 1, lino, ldata1, trace = TRUE)
+coef(fit, matrix = TRUE)
Coef(fit)
head(fitted(fit))
summary(fit)
# Nonstandard beta distribution
-ldat2 = data.frame(y = rlino(n=1000, shape1=2, shape2=3, lambda=exp(1)))
-fit = vglm(y~1, lino(lshape1=identity, lshape2=identity, ilamb=10), ldat2)
-coef(fit, mat=TRUE)
+ldata2 <- data.frame(y = rlino(n = 1000, shape1 = 2, shape2 = 3, lambda = exp(1)))
+fit <- vglm(y~1, lino(lshape1 = identity, lshape2 = identity, ilamb = 10), ldata2)
+coef(fit, matrix = TRUE)
}
\keyword{models}
\keyword{regression}
diff --git a/man/linoUC.Rd b/man/linoUC.Rd
index 324555e..3ae7215 100644
--- a/man/linoUC.Rd
+++ b/man/linoUC.Rd
@@ -9,12 +9,13 @@
Density, distribution function, quantile function and random
generation for the generalized beta distribution, as proposed
by Libby and Novick (1982).
+
}
\usage{
-dlino(x, shape1, shape2, lambda=1, log=FALSE)
-plino(q, shape1, shape2, lambda=1)
-qlino(p, shape1, shape2, lambda=1)
-rlino(n, shape1, shape2, lambda=1)
+dlino(x, shape1, shape2, lambda = 1, log = FALSE)
+plino(q, shape1, shape2, lambda = 1)
+qlino(p, shape1, shape2, lambda = 1)
+rlino(n, shape1, shape2, lambda = 1)
}
\arguments{
\item{x, q}{vector of quantiles.}
@@ -24,7 +25,7 @@ rlino(n, shape1, shape2, lambda=1)
\item{shape1, shape2, lambda}{ see \code{\link{lino}}. }
\item{log}{
Logical.
- If \code{log=TRUE} then the logarithm of the density is returned.
+ If \code{log = TRUE} then the logarithm of the density is returned.
}
}
@@ -33,6 +34,8 @@ rlino(n, shape1, shape2, lambda=1)
\code{plino} gives the distribution function,
\code{qlino} gives the quantile function, and
\code{rlino} generates random deviates.
+
+
}
%\references{
% Libby, D. L. and Novick, M. R. (1982)
@@ -46,33 +49,41 @@ rlino(n, shape1, shape2, lambda=1)
% NY: Marcel Dekker, Inc.
%
%}
+
+
\author{ T. W. Yee }
\details{
See \code{\link{lino}}, the \pkg{VGAM} family function
for estimating the parameters,
for the formula of the probability density function and other details.
+
+
}
%\note{
-%
+%
%}
+
+
\seealso{
\code{\link{lino}}.
+
+
}
\examples{
\dontrun{
-lambda = 0.4; shape1 = exp(1.3); shape2 = exp(1.3)
-x = seq(0.0, 1.0, len=101)
-plot(x, dlino(x, shape1=shape1, shape2=shape2, lambda=lambda),
- type="l", col="blue", las=1, ylab="",
- main="Blue is density, red is cumulative distribution function",
- sub="Purple lines are the 10,20,...,90 percentiles")
-abline(h=0, col="blue", lty=2)
-lines(x, plino(x, shape1=shape1, shape2=shape2, l=lambda), col="red")
-probs = seq(0.1, 0.9, by=0.1)
-Q = qlino(probs, shape1=shape1, shape2=shape2, lambda=lambda)
-lines(Q, dlino(Q, shape1=shape1, shape2=shape2, lambda=lambda),
- col="purple", lty=3, type="h")
-plino(Q, shape1=shape1, shape2=shape2, l=lambda) - probs # Should be all 0
+lambda <- 0.4; shape1 <- exp(1.3); shape2 <- exp(1.3)
+x <- seq(0.0, 1.0, len = 101)
+plot(x, dlino(x, shape1 = shape1, shape2 = shape2, lambda = lambda),
+ type = "l", col = "blue", las = 1, ylab = "",
+ main = "Blue is density, red is cumulative distribution function",
+ sub = "Purple lines are the 10,20,...,90 percentiles")
+abline(h = 0, col = "blue", lty = 2)
+lines(x, plino(x, shape1 = shape1, shape2 = shape2, l = lambda), col = "red")
+probs <- seq(0.1, 0.9, by = 0.1)
+Q <- qlino(probs, shape1 = shape1, shape2 = shape2, lambda = lambda)
+lines(Q, dlino(Q, shape1 = shape1, shape2 = shape2, lambda = lambda),
+ col = "purple", lty = 3, type = "h")
+plino(Q, shape1 = shape1, shape2 = shape2, l = lambda) - probs # Should be all 0
}
}
\keyword{distribution}
diff --git a/man/lirat.Rd b/man/lirat.Rd
index df616e0..27a41b6 100644
--- a/man/lirat.Rd
+++ b/man/lirat.Rd
@@ -4,6 +4,8 @@
\title{ Low-iron Rat Teratology Data }
\description{
Low-iron rat teratology data.
+
+
}
\usage{data(lirat)}
\format{
@@ -51,11 +53,15 @@ are accounted for.
Extra-binomial and Extra-Poisson Variation.
\emph{Biometrics},
\bold{47}, 383--401.
+
+
}
\references{
Shepard, T. H., Mackler, B. and Finch, C. A. (1980)
Reproductive studies in the iron-deficient rat.
\emph{Teratology}, \bold{22}, 329--334.
+
+
}
\examples{
\dontrun{
diff --git a/man/lms.bcg.Rd b/man/lms.bcg.Rd
index 14eda24..5a6c690 100644
--- a/man/lms.bcg.Rd
+++ b/man/lms.bcg.Rd
@@ -9,7 +9,6 @@
\usage{
lms.bcg(percentiles = c(25, 50, 75), zero = c(1, 3),
llambda = "identity", lmu = "identity", lsigma = "loge",
- elambda = list(), emu = list(), esigma = list(),
dfmu.init = 4, dfsigma.init = 2, ilambda = 1, isigma = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -30,10 +29,6 @@ lms.bcg(percentiles = c(25, 50, 75), zero = c(1, 3),
See \code{\link{lms.bcn}}.
}
- \item{elambda, emu, esigma}{
- See \code{\link{lms.bcn}}.
-
- }
\item{dfmu.init, dfsigma.init}{
See \code{\link{lms.bcn}}.
@@ -96,11 +91,12 @@ contains further information and examples.
\code{\link{bmi.nz}},
\code{\link{amlexponential}}.
+
}
\examples{
# This converges, but deplot(fit) and qtplot(fit) do not work
-fit0 = vglm(BMI ~ bs(age, df = 4), lms.bcg, bmi.nz, trace = TRUE)
+fit0 <- vglm(BMI ~ bs(age, df = 4), lms.bcg, bmi.nz, trace = TRUE)
coef(fit0, matrix = TRUE)
\dontrun{
par(mfrow = c(1, 1))
@@ -108,13 +104,13 @@ plotvgam(fit0, se = TRUE) # Plot mu function (only)
}
# Use a trick: fit0 is used for initial values for fit1.
-fit1 = vgam(BMI ~ s(age, df = c(4, 2)), etastart = predict(fit0),
- lms.bcg(zero = 1), bmi.nz, trace = TRUE)
+fit1 <- vgam(BMI ~ s(age, df = c(4, 2)), etastart = predict(fit0),
+ lms.bcg(zero = 1), bmi.nz, trace = TRUE)
# Difficult to get a model that converges.
# Here, we prematurely stop iterations because it fails near the solution.
-fit2 = vgam(BMI ~ s(age, df = c(4, 2)), maxit = 4,
- lms.bcg(zero = 1, ilam = 3), bmi.nz, trace = TRUE)
+fit2 <- vgam(BMI ~ s(age, df = c(4, 2)), maxit = 4,
+ lms.bcg(zero = 1, ilam = 3), bmi.nz, trace = TRUE)
summary(fit1)
head(predict(fit1))
head(fitted(fit1))
@@ -129,12 +125,12 @@ qtplot(fit1, percentiles=c(5, 50, 90, 99), main = "Quantiles",
xlim = c(15, 90), las = 1, ylab = "BMI", lwd = 2, lcol = 4)
# Density plot
-ygrid = seq(15, 43, len = 100) # BMI ranges
+ygrid <- seq(15, 43, len = 100) # BMI ranges
par(mfrow = c(1, 1), lwd = 2)
-(aa = deplot(fit1, x0 = 20, y = ygrid, xlab = "BMI", col = "black",
+(aa <- deplot(fit1, x0 = 20, y = ygrid, xlab = "BMI", col = "black",
main = "Density functions at Age = 20 (black), 42 (red) and 55 (blue)"))
-aa = deplot(fit1, x0=42, y=ygrid, add=TRUE, llty=2, col="red")
-aa = deplot(fit1, x0=55, y=ygrid, add=TRUE, llty=4, col="blue", Attach=TRUE)
+aa <- deplot(fit1, x0=42, y=ygrid, add=TRUE, llty=2, col="red")
+aa <- deplot(fit1, x0=55, y=ygrid, add=TRUE, llty=4, col="blue", Attach=TRUE)
aa at post$deplot # Contains density function values
}
}
diff --git a/man/lms.bcn.Rd b/man/lms.bcn.Rd
index 8e3f566..ee7a196 100644
--- a/man/lms.bcn.Rd
+++ b/man/lms.bcn.Rd
@@ -1,16 +1,15 @@
\name{lms.bcn}
\alias{lms.bcn}
%- Also NEED an '\alias' for EACH other topic documented here.
-\title{ LMS Quantile/Expectile Regression with a Box-Cox Transformation to Normality }
+\title{ LMS Quantile Regression with a Box-Cox Transformation to Normality }
\description{
- LMS quantile/expectile regression with the Box-Cox transformation
+ LMS quantile regression with the Box-Cox transformation
to normality.
}
\usage{
lms.bcn(percentiles = c(25, 50, 75), zero = c(1, 3),
llambda = "identity", lmu = "identity", lsigma = "loge",
- elambda = list(), emu = list(), esigma = list(),
dfmu.init = 4, dfsigma.init = 2, ilambda = 1,
isigma = NULL, expectiles = FALSE)
}
@@ -19,9 +18,12 @@ lms.bcn(percentiles = c(25, 50, 75), zero = c(1, 3),
\item{percentiles}{
A numerical vector containing values between 0 and 100,
- which are the quantiles or expectiles.
+ which are the quantiles.
They will be returned as `fitted values'.
+% or expectiles.
+
+
}
\item{zero}{
An integer-valued vector specifying which
@@ -32,6 +34,7 @@ lms.bcn(percentiles = c(25, 50, 75), zero = c(1, 3),
functions of the covariates.
For more information see \code{\link{CommonVGAMffArguments}}.
+
}
\item{llambda, lmu, lsigma}{
Parameter link functions applied to the first, second and third
@@ -39,11 +42,6 @@ lms.bcn(percentiles = c(25, 50, 75), zero = c(1, 3),
See \code{\link{Links}} for more choices,
and \code{\link{CommonVGAMffArguments}}.
- }
- \item{elambda, emu, esigma}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information,
- as well as \code{\link{CommonVGAMffArguments}}.
}
\item{dfmu.init}{
@@ -51,6 +49,7 @@ lms.bcn(percentiles = c(25, 50, 75), zero = c(1, 3),
get an initial estimate of mu.
See \code{\link{vsmooth.spline}}.
+
}
\item{dfsigma.init}{
Degrees of freedom for the cubic smoothing spline fit applied to
@@ -59,12 +58,14 @@ lms.bcn(percentiles = c(25, 50, 75), zero = c(1, 3),
This argument may be assigned \code{NULL} to get an initial value
using some other algorithm.
+
}
\item{ilambda}{
Initial value for lambda.
If necessary, it is recycled to be a vector of length \eqn{n}
where \eqn{n} is the number of (independent) observations.
+
}
\item{isigma}{
Optional initial value for sigma.
@@ -72,11 +73,15 @@ lms.bcn(percentiles = c(25, 50, 75), zero = c(1, 3),
The default value, \code{NULL}, means an initial value is computed
in the \code{@initialize} slot of the family function.
+
}
\item{expectiles}{
- A single logical. If \code{TRUE} then the method is LMS-expectile
- regression; \emph{expectiles} are returned rather than quantiles.
- The default is LMS quantile regression based on the normal distribution.
+ Experimental; please do not use.
+
+% A single logical. If \code{TRUE} then the method is LMS-expectile
+% regression; \emph{expectiles} are returned rather than quantiles.
+% The default is LMS quantile regression based on the normal distribution.
+
}
@@ -176,8 +181,8 @@ contains further information and examples.
and negative values.
- LMS-BCN expectile regression is a \emph{new} methodology proposed
- by myself!
+% LMS-BCN expectile regression is a \emph{new} methodology proposed
+% by myself!
In general, the lambda and sigma functions should be more smoother
@@ -197,7 +202,10 @@ contains further information and examples.
\section{Warning }{
The computations are not simple, therefore convergence may fail.
- In that case, try different starting values.
+ Set \code{trace = TRUE} to monitor convergence if it isn't set already.
+ Convergence failure will occur if, e.g., the response is bimodal
+ at any particular value of \eqn{x}.
+ In case of convergence failure, try different starting values.
Also, the estimate may diverge quickly near the solution,
in which case try prematurely
stopping the iterations by assigning \code{maxits} to be the iteration
@@ -225,25 +233,25 @@ contains further information and examples.
}
\examples{
-mysubset = subset(xs.nz, sex == "M" & ethnic == "1" & Study1)
-mysubset = transform(mysubset, BMI = weight / height^2)
-BMIdata = mysubset[, c("age", "BMI")]
-BMIdata = na.omit(BMIdata)
-BMIdata = subset(BMIdata, BMI < 80 & age < 65) # Delete an outlier
+mysubset <- subset(xs.nz, sex == "M" & ethnic == "1" & Study1)
+mysubset <- transform(mysubset, BMI = weight / height^2)
+BMIdata <- mysubset[, c("age", "BMI")]
+BMIdata <- na.omit(BMIdata)
+BMIdata <- subset(BMIdata, BMI < 80 & age < 65) # Delete an outlier
summary(BMIdata)
-fit = vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), BMIdata, trace = TRUE)
+fit <- vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), BMIdata)
head(predict(fit))
head(fitted(fit))
head(BMIdata)
head(cdf(fit)) # Person 56 is probably overweight, given his age
-colMeans(c(depvar(fit)) < fitted(fit)) # Sample proportions below the quantiles
+100 * colMeans(c(depvar(fit)) < fitted(fit)) # Empirical proportions
# Convergence problems? Try this trick: fit0 is a simpler model used for fit1
-fit0 = vgam(BMI ~ s(age, df = 4), lms.bcn(zero = c(1,3)), BMIdata, trace = TRUE)
-fit1 = vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), BMIdata,
- etastart = predict(fit0), trace = TRUE)
+fit0 <- vgam(BMI ~ s(age, df = 4), lms.bcn(zero = c(1,3)), BMIdata)
+fit1 <- vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero = 1), BMIdata,
+ etastart = predict(fit0))
\dontrun{
# Quantile plot
@@ -252,13 +260,13 @@ qtplot(fit, percentiles = c(5, 50, 90, 99), main = "Quantiles",
xlim = c(15, 66), las = 1, ylab = "BMI", lwd = 2, lcol = 4)
# Density plot
-ygrid = seq(15, 43, len = 100) # BMI ranges
-par(mfrow=c(1, 1), lwd = 2)
-(aa = deplot(fit, x0 = 20, y = ygrid, xlab = "BMI", col = "black",
+ygrid <- seq(15, 43, len = 100) # BMI ranges
+par(mfrow = c(1, 1), lwd = 2)
+(aa <- deplot(fit, x0 = 20, y = ygrid, xlab = "BMI", col = "black",
main = "Density functions at Age = 20 (black), 42 (red) and 55 (blue)"))
-aa = deplot(fit, x0 = 42, y = ygrid, add = TRUE, llty = 2, col = "red")
-aa = deplot(fit, x0 = 55, y = ygrid, add = TRUE, llty = 4, col = "blue",
- Attach = TRUE)
+aa <- deplot(fit, x0 = 42, y = ygrid, add = TRUE, llty = 2, col = "red")
+aa <- deplot(fit, x0 = 55, y = ygrid, add = TRUE, llty = 4, col = "blue",
+ Attach = TRUE)
aa at post$deplot # Contains density function values
}
}
diff --git a/man/lms.yjn.Rd b/man/lms.yjn.Rd
index 5a9535d..40ea48c 100644
--- a/man/lms.yjn.Rd
+++ b/man/lms.yjn.Rd
@@ -9,13 +9,12 @@
}
\usage{
lms.yjn(percentiles = c(25, 50, 75), zero = c(1,3),
- llambda = "identity", lsigma = "loge", elambda = list(),
- esigma = list(), dfmu.init = 4, dfsigma.init = 2,
+ llambda = "identity", lsigma = "loge",
+ dfmu.init = 4, dfsigma.init = 2,
ilambda = 1, isigma = NULL, rule = c(10, 5),
yoffset = NULL, diagW = FALSE, iters.diagW = 6)
lms.yjn2(percentiles=c(25,50,75), zero=c(1,3),
llambda = "identity", lmu = "identity", lsigma = "loge",
- elambda = list(), emu = list(), esigma = list(),
dfmu.init = 4, dfsigma.init = 2, ilambda = 1.0,
isigma = NULL, yoffset = NULL, nsimEIM = 250)
}
@@ -35,10 +34,6 @@ lms.yjn2(percentiles=c(25,50,75), zero=c(1,3),
See \code{\link{lms.bcn}}.
}
- \item{elambda, emu, esigma}{
- See \code{\link{lms.bcn}}.
-
- }
\item{dfmu.init, dfsigma.init}{
See \code{\link{lms.bcn}}.
@@ -158,7 +153,7 @@ The generic function \code{predict}, when applied to a
}
\examples{
-fit = vgam(BMI ~ s(age, df = 4), lms.yjn, bmi.nz, trace = TRUE)
+fit <- vgam(BMI ~ s(age, df = 4), lms.yjn, bmi.nz, trace = TRUE)
head(predict(fit))
head(fitted(fit))
head(bmi.nz)
@@ -172,12 +167,12 @@ qtplot(fit, percentiles = c(5, 50, 90, 99), main = "Quantiles",
xlim = c(15, 90), las = 1, ylab = "BMI", lwd = 2, lcol = 4)
# Density plot
-ygrid = seq(15, 43, len=100) # BMI ranges
-par(mfrow=c(1,1), lwd=2)
-(aa = deplot(fit, x0=20, y=ygrid, xlab="BMI", col="black",
- main="Density functions at Age = 20 (black), 42 (red) and 55 (blue)"))
-aa = deplot(fit, x0=42, y=ygrid, add=TRUE, llty=2, col="red")
-aa = deplot(fit, x0=55, y=ygrid, add=TRUE, llty=4, col="blue", Attach=TRUE)
+ygrid <- seq(15, 43, len = 100) # BMI ranges
+par(mfrow = c(1, 1), lwd = 2)
+(aa <- deplot(fit, x0 = 20, y = ygrid, xlab = "BMI", col = "black",
+ main = "Density functions at Age = 20 (black), 42 (red) and 55 (blue)"))
+aa <- deplot(fit, x0 = 42, y = ygrid, add = TRUE, llty = 2, col = "red")
+aa <- deplot(fit, x0 = 55, y = ygrid, add = TRUE, llty = 4, col = "blue", Attach = TRUE)
with(aa at post, deplot) # Contains density function values; == a at post$deplot
}
}
diff --git a/man/logUC.Rd b/man/logUC.Rd
index 01c8737..251a6f9 100644
--- a/man/logUC.Rd
+++ b/man/logUC.Rd
@@ -70,16 +70,19 @@ New York: Wiley-Interscience, Third edition.
Very large values in \code{q} are handled by an approximation by
Owen (1965).
+
}
\seealso{
- \code{\link{logff}}.
+ \code{\link{logff}}.
+
+
}
\examples{
dlog(1:20, 0.5)
rlog(20, 0.5)
-\dontrun{ prob = 0.8; x = 1:10
+\dontrun{ prob <- 0.8; x <- 1:10
plot(x, dlog(x, prob = prob), type = "h", ylim = 0:1,
sub = "prob=0.8", las = 1, col = "blue", ylab = "Probability",
main="Logarithmic distribution: blue=density; red=distribution function")
diff --git a/man/logc.Rd b/man/logc.Rd
index 675472d..767fcf5 100644
--- a/man/logc.Rd
+++ b/man/logc.Rd
@@ -8,7 +8,7 @@
}
\usage{
-logc(theta, earg = list(), inverse = FALSE, deriv = 0,
+logc(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE)
}
%- maybe also 'usage' for other objects documented here.
@@ -18,22 +18,18 @@ logc(theta, earg = list(), inverse = FALSE, deriv = 0,
See below for further details.
}
- \item{earg}{
- Optional list. Extra argument for passing in additional information.
- Values of \code{theta} which are less than or equal to 1 can be
- replaced by the \code{bvalue} component of the list \code{earg}
- before computing the link function value.
- The component name \code{bvalue} stands for ``boundary value''.
- See \code{\link{Links}} for general information about \code{earg}.
+ \item{bvalue}{
+ See \code{\link{Links}}.
+
+
+ }
+
+ \item{inverse, deriv, short, tag}{
+ Details at \code{\link{Links}}.
+
}
- \item{inverse}{ Logical. If \code{TRUE} the inverse function is computed. }
- \item{deriv}{ Order of the derivative. Integer with value 0, 1 or 2. }
- \item{short}{ Used for labelling the \code{blurb} slot of a
- \code{\link{vglmff-class}} object. }
- \item{tag}{ Used for labelling the linear/additive predictor in the
- \code{initialize} slot of a \code{\link{vglmff-class}} object.
- Contains a little more information if \code{TRUE}. }
+
}
\details{
The complementary-log link function is suitable for parameters that
@@ -41,8 +37,8 @@ logc(theta, earg = list(), inverse = FALSE, deriv = 0,
Numerical values of \code{theta} close to 1 or out of range
result in
\code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
- The arguments \code{short} and \code{tag} are used only if
- \code{theta} is character.
+
+
}
\value{
@@ -51,23 +47,30 @@ logc(theta, earg = list(), inverse = FALSE, deriv = 0,
and if \code{inverse = TRUE} then
\code{1-exp(theta)}.
+
For \code{deriv = 1}, then the function returns
\emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
if \code{inverse = FALSE},
else if \code{inverse = TRUE} then it returns the reciprocal.
+
Here, all logarithms are natural logarithms, i.e., to base \emph{e}.
+
}
\references{
McCullagh, P. and Nelder, J. A. (1989)
\emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
+
+
}
\author{ Thomas W. Yee }
\note{
Numerical instability may occur when \code{theta} is close to 1.
- One way of overcoming this is to use \code{earg}.
+ One way of overcoming this is to use \code{bvalue}.
+
}
@@ -78,12 +81,13 @@ logc(theta, earg = list(), inverse = FALSE, deriv = 0,
\code{\link{loglog}},
\code{\link{logoff}}.
+
}
\examples{
\dontrun{
-logc(seq(-0.2, 1.1, by=0.1)) # Has NAs
+logc(seq(-0.2, 1.1, by = 0.1)) # Has NAs
}
-logc(seq(-0.2, 1.1, by=0.1), earg=list(bval=1-.Machine$double.eps)) # Has no NAs
+logc(seq(-0.2, 1.1, by = 0.1), bvalue = 1 - .Machine$double.eps) # Has no NAs
}
\keyword{math}
\keyword{models}
diff --git a/man/loge.Rd b/man/loge.Rd
index f406851..cf09998 100644
--- a/man/loge.Rd
+++ b/man/loge.Rd
@@ -9,9 +9,9 @@
}
\usage{
-loge(theta, earg = list(), inverse = FALSE, deriv = 0,
+loge(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE)
-nloge(theta, earg = list(), inverse = FALSE, deriv = 0,
+nloge(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE)
}
%- maybe also 'usage' for other objects documented here.
@@ -22,30 +22,18 @@ nloge(theta, earg = list(), inverse = FALSE, deriv = 0,
}
- \item{earg}{
- Optional list. Extra argument for passing in additional information.
- Values of \code{theta} which are less than or equal to 0 can be
- replaced by the \code{bvalue} component of the list \code{earg}
- before computing the link function value.
- The component name \code{bvalue} stands for ``boundary value''.
- See \code{\link{Links}} for general information about \code{earg}.
+ \item{bvalue}{
+ See \code{\link{Links}}.
}
- \item{inverse}{ Logical. If \code{TRUE} the inverse function is computed. }
- \item{deriv}{ Order of the derivative. Integer with value 0, 1 or 2. }
- \item{short}{
- Used for labelling the \code{blurb} slot of a
- \code{\link{vglmff-class}} object.
- }
- \item{tag}{
- Used for labelling the linear/additive predictor in the
- \code{initialize} slot of a \code{\link{vglmff-class}} object.
- Contains a little more information if \code{TRUE}.
+ \item{inverse, deriv, short, tag}{
+ Details at \code{\link{Links}}.
}
+
}
\details{
The log link function is very commonly used for parameters that
@@ -53,8 +41,7 @@ nloge(theta, earg = list(), inverse = FALSE, deriv = 0,
Numerical values of \code{theta} close to 0 or out of range
result in
\code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
- The arguments \code{short} and \code{tag} are used only if
- \code{theta} is character.
+
The function \code{loge} computes
@@ -92,7 +79,7 @@ nloge(theta, earg = list(), inverse = FALSE, deriv = 0,
Numerical instability may occur when \code{theta} is close to 0 unless
- \code{earg} is used.
+ \code{bvalue} is used.
}
@@ -111,9 +98,9 @@ nloge(theta, earg = list(), inverse = FALSE, deriv = 0,
}
\examples{
\dontrun{ loge(seq(-0.2, 0.5, by = 0.1))
- loge(seq(-0.2, 0.5, by = 0.1), earg = list(bvalue = .Machine$double.xmin))
+ loge(seq(-0.2, 0.5, by = 0.1), bvalue = .Machine$double.xmin)
nloge(seq(-0.2, 0.5, by = 0.1))
-nloge(seq(-0.2, 0.5, by = 0.1), earg = list(bvalue = .Machine$double.xmin)) }
+nloge(seq(-0.2, 0.5, by = 0.1), bvalue = .Machine$double.xmin) }
}
\keyword{math}
\keyword{models}
diff --git a/man/logff.Rd b/man/logff.Rd
index c4a5488..ebd862c 100644
--- a/man/logff.Rd
+++ b/man/logff.Rd
@@ -7,19 +7,14 @@
}
\usage{
-logff(link = "logit", earg=list(), init.c = NULL)
+logff(link = "logit", init.c = NULL, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{link}{
- Parameter link function applied to the parameter \eqn{c},
+ Parameter link function for the parameter \eqn{c},
which lies between 0 and 1.
- See \code{\link{Links}} for more choices.
-
- }
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
+ See \code{\link{Links}} for more choices and information.
}
\item{init.c}{
@@ -28,6 +23,10 @@ logff(link = "logit", earg=list(), init.c = NULL)
The default is to choose an initial value internally.
}
+ \item{zero}{
+ Details at \code{\link{CommonVGAMffArguments}}.
+
+ }
}
\details{
The logarithmic distribution is based on the logarithmic series,
@@ -71,6 +70,10 @@ New York: Wiley-Interscience, Third edition.
\code{\link{loge}} corresponds to this.
+ Multiple responses are permitted.
+
+
+ The logarithmic distribution is sometimes confused with the log-series
The logarithmic distribution is sometimes confused with the log-series
distribution. The latter was used by Fisher et al. for species abundance
data, and has two parameters.
@@ -88,25 +91,25 @@ New York: Wiley-Interscience, Third edition.
}
\examples{
-ldata = data.frame(y = rlog(n = 1000, prob = logit(0.2, inverse = TRUE)))
-fit = vglm(y ~ 1, logff, ldata, trace = TRUE, crit = "c")
+ldata <- data.frame(y = rlog(n = 1000, prob = logit(0.2, inverse = TRUE)))
+fit <- vglm(y ~ 1, logff, ldata, trace = TRUE, crit = "c")
coef(fit, matrix = TRUE)
Coef(fit)
\dontrun{with(ldata,
hist(y, prob = TRUE, breaks = seq(0.5, max(y) + 0.5, by = 1),
border = "blue"))
-x = seq(1, with(ldata, max(y)), by=1)
+x <- seq(1, with(ldata, max(y)), by = 1)
with(ldata, lines(x, dlog(x, Coef(fit)[1]), col = "orange", type = "h", lwd = 2)) }
# Example: Corbet (1943) butterfly Malaya data
-corbet = data.frame(nindiv = 1:24,
- ofreq = c(118, 74, 44, 24, 29, 22, 20, 19, 20, 15, 12,
- 14, 6, 12, 6, 9, 9, 6, 10, 10, 11, 5, 3, 3))
-fit = vglm(nindiv ~ 1, logff, data = corbet, weights = ofreq)
+corbet <- data.frame(nindiv = 1:24,
+ ofreq = c(118, 74, 44, 24, 29, 22, 20, 19, 20, 15, 12,
+ 14, 6, 12, 6, 9, 9, 6, 10, 10, 11, 5, 3, 3))
+fit <- vglm(nindiv ~ 1, logff, data = corbet, weights = ofreq)
coef(fit, matrix = TRUE)
-chat = Coef(fit)["c"]
-pdf2 = dlog(x = with(corbet, nindiv), prob = chat)
+chat <- Coef(fit)["c"]
+pdf2 <- dlog(x = with(corbet, nindiv), prob = chat)
print(with(corbet, cbind(nindiv, ofreq, fitted = pdf2 * sum(ofreq))), dig = 1)
}
\keyword{models}
diff --git a/man/logistic.Rd b/man/logistic.Rd
index fde3f27..43dba2a 100644
--- a/man/logistic.Rd
+++ b/man/logistic.Rd
@@ -10,11 +10,9 @@
}
\usage{
-logistic1(llocation = "identity", elocation = list(),
- scale.arg = 1, imethod = 1)
+logistic1(llocation = "identity", scale.arg = 1, imethod = 1)
logistic2(llocation = "identity", lscale = "loge",
- elocation = list(), escale = list(),
- ilocation = NULL, iscale = NULL, imethod = 1, zero = NULL)
+ ilocation = NULL, iscale = NULL, imethod = 1, zero = -2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -25,11 +23,6 @@ logistic2(llocation = "identity", lscale = "loge",
\code{\link{CommonVGAMffArguments}} for more information.
}
- \item{elocation, escale}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{scale.arg}{
Known positive scale parameter (called \eqn{s} below).
@@ -72,6 +65,9 @@ logistic2(llocation = "identity", lscale = "loge",
\code{logistic2}.
+ \code{logistic2} can handle multiple responses.
+
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -120,19 +116,18 @@ A note on Deriving the Information Matrix for a Logistic Distribution,
}
\examples{
-# location unknown, scale known
-ldat1 = data.frame(x = runif(nn <- 500))
-ldat1 = transform(ldat1, y = rlogis(nn, loc = 1+5*x, scale = 4))
-fit = vglm(y ~ x, logistic1(scale = 4), ldat1, trace = TRUE, crit = "c")
-coef(fit, matrix = TRUE)
+# Location unknown, scale known
+ldata <- data.frame(x2 = runif(nn <- 500))
+ldata <- transform(ldata, y1 = rlogis(nn, loc = 1+5*x2, scale = exp(2)))
+fit1 <- vglm(y1 ~ x2, logistic1(scale = 4), ldata, trace = TRUE, crit = "c")
+coef(fit1, matrix = TRUE)
# Both location and scale unknown
-ldat2 = data.frame(x = runif(nn <- 2000))
-ldat2 = transform(ldat2, y = rlogis(nn, loc = 1+5*x, scale = exp(0+1*x)))
-fit = vglm(y ~ x, logistic2, ldat2)
-coef(fit, matrix = TRUE)
-vcov(fit)
-summary(fit)
+ldata <- transform(ldata, y2 = rlogis(nn, loc = 1+5*x2, scale = exp(0+1*x2)))
+fit2 <- vglm(cbind(y1, y2) ~ x2, logistic2, ldata, trace = TRUE)
+coef(fit2, matrix = TRUE)
+vcov(fit2)
+summary(fit2)
}
\keyword{models}
\keyword{regression}
diff --git a/man/logit.Rd b/man/logit.Rd
index d3f058d..94d5198 100644
--- a/man/logit.Rd
+++ b/man/logit.Rd
@@ -9,10 +9,10 @@
}
\usage{
-logit(theta, earg = list(), inverse = FALSE, deriv = 0,
- short = TRUE, tag = FALSE)
-elogit(theta, earg = list(min = 0, max = 1), inverse = FALSE, deriv = 0,
+logit(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE)
+elogit(theta, min = 0, max = 1, bminvalue = NULL, bmaxvalue = NULL,
+ inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -21,53 +21,35 @@ elogit(theta, earg = list(min = 0, max = 1), inverse = FALSE, deriv = 0,
See below for further details.
}
- \item{earg}{
- Optional list. Extra argument for passing in additional information.
- Values of \code{theta} which are less than or equal to 0 can be
- replaced by the \code{bvalue} component of the list \code{earg}
- before computing the link function value.
- Values of \code{theta} which are greater than or equal to 1 can be
- replaced by 1 minus the \code{bvalue} component of the list \code{earg}
- before computing the link function value.
- The component name \code{bvalue} stands for ``boundary value''.
- See \code{\link{Links}} for general information about \code{earg}.
- Similarly, for \code{elogit}, values of \code{theta} less than or equal
- to \eqn{A} or greater than or equal to \eqn{B} can be replaced
- by the \code{bminvalue} and \code{bmaxvalue} components of the list \code{earg}.
+ \item{bvalue, bminvalue, bmaxvalue}{
+ See \code{\link{Links}}.
+ These are boundary values.
+ For \code{elogit}, values of \code{theta} less than or equal
+ to \eqn{A} or greater than or equal to \eqn{B} can be replaced
+ by \code{bminvalue} and \code{bmaxvalue}.
+
+
+ }
% Extra argument for passing in additional information.
% For \code{logit}, values of \code{theta} which are equal to 0 or 1 are
% replaced by \code{earg} or \code{1-earg}
% (respectively, and if given) before computing the logit.
- For \code{elogit}, \code{earg} should be a list with components
- \code{min} giving \eqn{A},
- \code{max} giving \eqn{B}, and for out of range values,
+ \item{min, max}{
+ For \code{elogit},
+ \code{min} gives \eqn{A},
+ \code{max} gives \eqn{B}, and for out of range values,
\code{bminvalue} and \code{bmaxvalue}.
- If \code{earg} is used, these
- component names should not be abbreviated.
}
- \item{inverse}{
- Logical. If \code{TRUE} the inverse function is computed.
- The inverse logit function is known as the \emph{expit} function.
- }
- \item{deriv}{
- Order of the derivative. Integer with value 0, 1 or 2.
+ \item{inverse, deriv, short, tag}{
+ Details at \code{\link{Links}}.
- }
- \item{short}{
- Used for labelling the \code{blurb} slot of a
- \code{\link{vglmff-class}} object.
}
- \item{tag}{
- Used for labelling the linear/additive predictor in the
- \code{initialize} slot of a \code{\link{vglmff-class}} object.
- Contains a little more information if \code{TRUE}.
- }
}
\details{
The logit link function is very commonly used for parameters that
@@ -76,6 +58,7 @@ elogit(theta, earg = list(min = 0, max = 1), inverse = FALSE, deriv = 0,
result in
\code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
+
The \emph{extended} logit link function \code{elogit} should be used
more generally for parameters that lie in the interval \eqn{(A,B)}, say.
The formula is
@@ -89,8 +72,7 @@ elogit(theta, earg = list(min = 0, max = 1), inverse = FALSE, deriv = 0,
However these can be replaced by values \eqn{bminvalue} and
\eqn{bmaxvalue} first before computing the link function.
- The arguments \code{short} and \code{tag} are used only if
- \code{theta} is character.
+
}
\value{
@@ -99,18 +81,22 @@ elogit(theta, earg = list(min = 0, max = 1), inverse = FALSE, deriv = 0,
and if \code{inverse = TRUE} then
\code{exp(theta)/(1+exp(theta))}.
+
For \code{deriv = 1}, then the function returns
\emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
if \code{inverse = FALSE},
else if \code{inverse = TRUE} then it returns the reciprocal.
+
Here, all logarithms are natural logarithms, i.e., to base \emph{e}.
+
}
\references{
McCullagh, P. and Nelder, J. A. (1989)
\emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
}
\author{ Thomas W. Yee }
@@ -118,12 +104,14 @@ elogit(theta, earg = list(min = 0, max = 1), inverse = FALSE, deriv = 0,
Numerical instability may occur when \code{theta} is
close to 1 or 0 (for \code{logit}), or close to \eqn{A} or \eqn{B} for
\code{elogit}.
- One way of overcoming this is to use \code{earg}.
+ One way of overcoming this is to use, e.g., \code{bvalue}.
+
In terms of the threshold approach with cumulative probabilities for
an ordinal response this link function corresponds to the univariate
logistic distribution (see \code{\link{logistic}}).
+
}
\seealso{
@@ -134,24 +122,25 @@ elogit(theta, earg = list(min = 0, max = 1), inverse = FALSE, deriv = 0,
\code{\link{logistic1}},
\code{\link{loge}}.
+
}
\examples{
-p = seq(0.01, 0.99, by = 0.01)
+p <- seq(0.01, 0.99, by = 0.01)
logit(p)
max(abs(logit(logit(p), inverse = TRUE) - p)) # Should be 0
-p = c(seq(-0.02, 0.02, by = 0.01), seq(0.97, 1.02, by = 0.01))
-logit(p) # Has NAs
-logit(p, earg = list(bvalue = .Machine$double.eps)) # Has no NAs
+p <- c(seq(-0.02, 0.02, by = 0.01), seq(0.97, 1.02, by = 0.01))
+logit(p) # Has NAs
+logit(p, bvalue = .Machine$double.eps) # Has no NAs
-p = seq(0.9, 2.2, by = 0.1)
-elogit(p, earg = list(min = 1, max = 2,
- bminvalue = 1 + .Machine$double.eps,
- bmaxvalue = 2 - .Machine$double.eps)) # Has no NAs
+p <- seq(0.9, 2.2, by = 0.1)
+elogit(p, min = 1, max = 2,
+ bminvalue = 1 + .Machine$double.eps,
+ bmaxvalue = 2 - .Machine$double.eps) # Has no NAs
\dontrun{ par(mfrow = c(2,2), lwd = (mylwd <- 2))
-y = seq(-4, 4, length = 100)
-p = seq(0.01, 0.99, by = 0.01)
+y <- seq(-4, 4, length = 100)
+p <- seq(0.01, 0.99, by = 0.01)
for(d in 0:1) {
matplot(p, cbind(logit(p, deriv = d), probit(p, deriv = d)),
type = "n", col = "purple", ylab = "transformation", las = 1,
@@ -186,10 +175,10 @@ for(d in 0) {
}
}
-p = seq(0.21, 0.59, by = 0.01)
-plot(p, elogit(p, earg = list(min = 0.2, max = 0.6)),
- type = "l", col = "black", ylab = "transformation", xlim = c(0,1),
- las = 1, main = "elogit(p, earg = list(min = 0.2, max = 0.6)")
+p <- seq(0.21, 0.59, by = 0.01)
+plot(p, elogit(p, min = 0.2, max = 0.6),
+ type = "l", col = "black", ylab = "transformation", xlim = c(0, 1),
+ las = 1, main = "elogit(p, min = 0.2, max = 0.6)")
par(lwd = 1)
}
}
diff --git a/man/loglapUC.Rd b/man/loglapUC.Rd
index 11609d2..2477747 100644
--- a/man/loglapUC.Rd
+++ b/man/loglapUC.Rd
@@ -12,16 +12,17 @@
(on the log scale),
and asymmetry parameter \code{kappa}.
+
}
\usage{
-dloglap(x, location.ald=0, scale.ald=1,
- tau=0.5, kappa=sqrt(tau/(1-tau)), log=FALSE)
-ploglap(q, location.ald=0, scale.ald=1,
- tau=0.5, kappa=sqrt(tau/(1-tau)))
-qloglap(p, location.ald=0, scale.ald=1,
- tau=0.5, kappa=sqrt(tau/(1-tau)))
-rloglap(n, location.ald=0, scale.ald=1,
- tau=0.5, kappa=sqrt(tau/(1-tau)))
+dloglap(x, location.ald = 0, scale.ald = 1,
+ tau = 0.5, kappa = sqrt(tau/(1-tau)), log = FALSE)
+ploglap(q, location.ald = 0, scale.ald = 1,
+ tau = 0.5, kappa = sqrt(tau/(1-tau)))
+qloglap(p, location.ald = 0, scale.ald = 1,
+ tau = 0.5, kappa = sqrt(tau/(1-tau)))
+rloglap(n, location.ald = 0, scale.ald = 1,
+ tau = 0.5, kappa = sqrt(tau/(1-tau)))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -58,19 +59,25 @@ rloglap(n, location.ald=0, scale.ald=1,
(ALD). There are many variants of ALDs and the one used here
is described in \code{\link{alaplace3}}.
+
}
\value{
\code{dloglap} gives the density,
\code{ploglap} gives the distribution function,
\code{qloglap} gives the quantile function, and
\code{rloglap} generates random deviates.
+
+
}
\references{
+
Kozubowski, T. J. and Podgorski, K. (2003)
Log-Laplace distributions.
\emph{International Mathematical Journal},
\bold{3}, 467--495.
+
+
}
\author{ T. W. Yee }
%\note{
@@ -84,23 +91,24 @@ Log-Laplace distributions.
% \code{\link{loglaplace3}}.
\code{\link{loglaplace1}}.
+
}
\examples{
-loc = 0; sigma = exp(0.5); kappa = 1
-x = seq(-0.2, 5, by=0.01)
+loc <- 0; sigma <- exp(0.5); kappa <- 1
+x <- seq(-0.2, 5, by = 0.01)
\dontrun{
-plot(x, dloglap(x, loc, sigma, kappa=kappa), type="l", col="blue",
- main="Blue is density, red is cumulative distribution function",
- ylim=c(0,1), sub="Purple are 5,10,...,95 percentiles", las=1, ylab="")
-abline(h=0, col="blue", lty=2)
-lines(qloglap(seq(0.05,0.95,by=0.05), loc, sigma, kappa=kappa),
- dloglap(qloglap(seq(0.05,0.95,by=0.05), loc, sigma, kappa=kappa),
- loc, sigma, kappa=kappa), col="purple", lty=3, type="h")
-lines(x, ploglap(x, loc, sigma, kappa=kappa), type="l", col="red")
-abline(h=0, lty=2)
+plot(x, dloglap(x, loc, sigma, kappa = kappa), type = "l", col = "blue",
+ main = "Blue is density, red is cumulative distribution function",
+ ylim = c(0,1), sub = "Purple are 5,10,...,95 percentiles", las = 1, ylab = "")
+abline(h = 0, col = "blue", lty = 2)
+lines(qloglap(seq(0.05,0.95,by = 0.05), loc, sigma, kappa = kappa),
+ dloglap(qloglap(seq(0.05,0.95,by = 0.05), loc, sigma, kappa = kappa),
+ loc, sigma, kappa = kappa), col = "purple", lty = 3, type = "h")
+lines(x, ploglap(x, loc, sigma, kappa = kappa), type = "l", col = "red")
+abline(h = 0, lty = 2)
}
-ploglap(qloglap(seq(0.05,0.95,by=0.05), loc, sigma, kappa=kappa),
- loc, sigma, kappa=kappa)
+ploglap(qloglap(seq(0.05,0.95,by = 0.05), loc, sigma, kappa = kappa),
+ loc, sigma, kappa = kappa)
}
\keyword{distribution}
diff --git a/man/loglaplace.Rd b/man/loglaplace.Rd
index 19b5f9c..6d99040 100644
--- a/man/loglaplace.Rd
+++ b/man/loglaplace.Rd
@@ -14,12 +14,12 @@
}
\usage{
-loglaplace1(tau = NULL, llocation = "loge", elocation = list(),
+loglaplace1(tau = NULL, llocation = "loge",
ilocation = NULL, kappa = sqrt(tau/(1 - tau)), Scale.arg = 1,
shrinkage.init = 0.95, parallelLocation = FALSE, digt = 4,
dfmu.init = 3, rep0 = 0.5, minquantile = 0, maxquantile = Inf,
imethod = 1, zero = NULL)
-logitlaplace1(tau = NULL, llocation = "logit", elocation = list(),
+logitlaplace1(tau = NULL, llocation = "logit",
ilocation = NULL, kappa = sqrt(tau/(1 - tau)),
Scale.arg = 1, shrinkage.init = 0.95, parallelLocation = FALSE,
digt = 4, dfmu.init = 3, rep01 = 0.5, imethod = 1, zero = NULL)
@@ -45,12 +45,6 @@ logitlaplace1(tau = NULL, llocation = "logit", elocation = list(),
}
- \item{elocation}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
-
- }
\item{ilocation}{
Optional initial values.
If given, it must be numeric and values are recycled to the
@@ -67,9 +61,9 @@ logitlaplace1(tau = NULL, llocation = "logit", elocation = list(),
}
-% \item{sameScale}{ Logical.
+% \item{eq.scale}{ Logical.
% Should the scale parameters be equal? It is advised to keep
-% \code{sameScale = TRUE} unchanged because it does not make sense to
+% \code{eq.scale = TRUE} unchanged because it does not make sense to
% have different values for each \code{tau} value.
% }
@@ -106,7 +100,7 @@ logitlaplace1(tau = NULL, llocation = "logit", elocation = list(),
These argument are effectively ignored by default since
\code{\link{loge}} keeps all quantiles positive.
However, if
- \code{llocation = "logoff", elocation = list(offset = 1)}
+ \code{llocation = logoff(offset = 1)}
then it is possible that the fitted quantiles have value 0
because \code{minquantile = 0}.
@@ -199,25 +193,26 @@ Log-Laplace distributions.
\code{\link{alaplace1}},
\code{\link{dloglap}}.
+
}
\examples{
# Example 1: quantile regression of counts with regression splines
-set.seed(123); my.k = exp(0)
-alldat = data.frame(x2 = sort(runif(n <- 500)))
-mymu = function(x) exp( 1 + 3*sin(2*x) / (x+0.5)^2)
-alldat = transform(alldat, y = rnbinom(n, mu = mymu(x2), size = my.k))
-mytau = c(0.1, 0.25, 0.5, 0.75, 0.9); mydof = 3
-fitp = vglm(y ~ bs(x2, df = mydof), data=alldat, trace = TRUE,
+set.seed(123); my.k <- exp(0)
+alldat <- data.frame(x2 = sort(runif(n <- 500)))
+mymu <- function(x) exp( 1 + 3*sin(2*x) / (x+0.5)^2)
+alldat <- transform(alldat, y = rnbinom(n, mu = mymu(x2), size = my.k))
+mytau <- c(0.1, 0.25, 0.5, 0.75, 0.9); mydof = 3
+fitp <- vglm(y ~ bs(x2, df = mydof), data=alldat, trace = TRUE,
loglaplace1(tau = mytau, parallelLoc = TRUE)) # halfstepping is usual
\dontrun{
par(las = 1) # Plot on a log1p() scale
-mylwd = 1.5
+mylwd <- 1.5
with(alldat, plot(x2, jitter(log1p(y), factor = 1.5), col = "red", pch = "o",
main = "Example 1; darkgreen=truth, blue=estimated", cex = 0.75))
with(alldat, matlines(x2, log1p(fitted(fitp)), col = "blue", lty = 1, lwd = mylwd))
-finexgrid = seq(0, 1, len=201)
+finexgrid <- seq(0, 1, len=201)
for(ii in 1:length(mytau))
lines(finexgrid, col = "darkgreen", lwd = mylwd,
log1p(qnbinom(p = mytau[ii], mu = mymu(finexgrid), si = my.k)))
@@ -226,14 +221,14 @@ fitp at extra # Contains useful information
# Example 2: sample proportions
-set.seed(123); nnn = 1000; ssize = 100 # ssize = 1 will not work!
-alldat = data.frame(x2 = sort(runif(nnn)))
-mymu = function(x) logit( 1.0 + 4*x, inv = TRUE)
-alldat = transform(alldat, ssize = ssize,
+set.seed(123); nnn <- 1000; ssize <- 100 # ssize = 1 will not work!
+alldat <- data.frame(x2 = sort(runif(nnn)))
+mymu <- function(x) logit( 1.0 + 4*x, inv = TRUE)
+alldat <- transform(alldat, ssize = ssize,
y2 = rbinom(nnn, size=ssize, prob = mymu(x2)) / ssize)
-mytau = c(0.25, 0.50, 0.75)
-fit1 = vglm(y2 ~ bs(x2, df = 3), data=alldat, weights=ssize, trace = TRUE,
+mytau <- c(0.25, 0.50, 0.75)
+fit1 <- vglm(y2 ~ bs(x2, df = 3), data=alldat, weights=ssize, trace = TRUE,
logitlaplace1(tau = mytau, lloc = "cloglog", paral = TRUE))
\dontrun{
@@ -247,11 +242,11 @@ with(alldat, lines(x2, trueFunction - mean(trueFunction), col = "darkgreen"))
# Plot the data + fitted quantiles (on the original scale)
-myylim = with(alldat, range(y2))
+myylim <- with(alldat, range(y2))
with(alldat, plot(x2, y2, col = "blue", ylim = myylim, las = 1, pch = ".", cex=2.5))
with(alldat, matplot(x2, fitted(fit1), add = TRUE, lwd = 3, type = "l"))
-truecol = rep(1:3, len=fit1 at misc$M) # Add the 'truth'
-smallxgrid = seq(0, 1, len=501)
+truecol <- rep(1:3, len=fit1 at misc$M) # Add the 'truth'
+smallxgrid <- seq(0, 1, len=501)
for(ii in 1:length(mytau))
lines(smallxgrid, col=truecol[ii], lwd=2,
qbinom(p = mytau[ii], prob = mymu(smallxgrid), size=ssize) / ssize)
@@ -261,7 +256,7 @@ for(ii in 1:length(mytau))
with(alldat, matplot(x2, predict(fit1), add = FALSE, lwd = 3, type = "l"))
# Add the 'truth'
for(ii in 1:length(mytau)) {
- true.quant = qbinom(p = mytau[ii], pr = mymu(smallxgrid), si=ssize)/ssize
+ true.quant <- qbinom(p = mytau[ii], pr = mymu(smallxgrid), si=ssize)/ssize
lines(smallxgrid, theta2eta(theta=true.quant, link=linkFunctionChar),
col=truecol[ii], lwd=2)
}
diff --git a/man/loglinb2.Rd b/man/loglinb2.Rd
index fc333a5..89aec32 100644
--- a/man/loglinb2.Rd
+++ b/man/loglinb2.Rd
@@ -84,23 +84,23 @@ McCullagh, P. and Nelder, J. A. (1989)
}
\examples{
-coalminers = transform(coalminers, Age = (age - 42) / 5)
+coalminers <- transform(coalminers, Age = (age - 42) / 5)
# Get the n x 4 matrix of counts
-temp = vglm(cbind(nBnW,nBW,BnW,BW) ~ Age, binom2.or, coalminers)
-counts = round(c(weights(temp, type = "prior")) * temp at y)
+temp <- vglm(cbind(nBnW,nBW,BnW,BW) ~ Age, binom2.or, coalminers)
+counts <- round(c(weights(temp, type = "prior")) * temp at y)
# Create a n x 2 matrix response for loglinb2()
-# bwmat = matrix(c(0,0, 0,1, 1,0, 1,1), 4, 2, byrow=TRUE)
-bwmat = cbind(bln=c(0,0,1,1), wheeze=c(0,1,0,1))
-matof1 = matrix(1, nrow(counts), 1)
-newminers = data.frame(bln = kronecker(matof1, bwmat[,1]),
+# bwmat <- matrix(c(0,0, 0,1, 1,0, 1,1), 4, 2, byrow = TRUE)
+bwmat <- cbind(bln = c(0,0,1,1), wheeze = c(0,1,0,1))
+matof1 <- matrix(1, nrow(counts), 1)
+newminers <- data.frame(bln = kronecker(matof1, bwmat[,1]),
wheeze = kronecker(matof1, bwmat[,2]),
wt = c(t(counts)),
Age = with(coalminers, rep(age, rep(4, length(age)))))
-newminers = newminers[with(newminers, wt) > 0,]
+newminers <- newminers[with(newminers, wt) > 0,]
-fit = vglm(cbind(bln,wheeze) ~ Age, loglinb2, weight = wt, data = newminers)
+fit <- vglm(cbind(bln,wheeze) ~ Age, loglinb2, weight = wt, data = newminers)
coef(fit, matrix = TRUE) # Same! (at least for the log odds-ratio)
summary(fit)
diff --git a/man/loglinb3.Rd b/man/loglinb3.Rd
index 8974686..d5af096 100644
--- a/man/loglinb3.Rd
+++ b/man/loglinb3.Rd
@@ -93,7 +93,7 @@ contains further information and examples.
}
\examples{
-fit = vglm(cbind(cyadea, beitaw, kniexc) ~ altitude, loglinb3, hunua)
+fit <- vglm(cbind(cyadea, beitaw, kniexc) ~ altitude, loglinb3, hunua)
coef(fit, matrix = TRUE)
head(fitted(fit))
summary(fit)
diff --git a/man/loglog.Rd b/man/loglog.Rd
index f3e09db..63a5c6b 100644
--- a/man/loglog.Rd
+++ b/man/loglog.Rd
@@ -8,7 +8,7 @@
}
\usage{
-loglog(theta, earg = list(), inverse = FALSE, deriv = 0,
+loglog(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE)
}
%- maybe also 'usage' for other objects documented here.
@@ -18,32 +18,17 @@ loglog(theta, earg = list(), inverse = FALSE, deriv = 0,
See below for further details.
}
- \item{earg}{
- Optional list. Extra argument for passing in additional information.
+ \item{bvalue}{
Values of \code{theta} which are less than or equal to 1 can be
- replaced by the \code{bvalue} component of the list \code{earg}
+ replaced by \code{bvalue}
before computing the link function value.
The component name \code{bvalue} stands for ``boundary value''.
- See \code{\link{Links}} for general information about \code{earg}.
+ See \code{\link{Links}} for more information.
}
- \item{inverse}{
- Logical. If \code{TRUE} the inverse function is computed.
+ \item{inverse, deriv, short, tag}{
+ Details at \code{\link{Links}}.
- }
- \item{deriv}{
- Order of the derivative. Integer with value 0, 1 or 2.
-
- }
- \item{short}{
- Used for labelling the \code{blurb} slot of a
- \code{\link{vglmff-class}} object.
-
- }
- \item{tag}{
- Used for labelling the linear/additive predictor in the
- \code{initialize} slot of a \code{\link{vglmff-class}} object.
- Contains a little more information if \code{TRUE}.
}
}
@@ -53,8 +38,7 @@ loglog(theta, earg = list(), inverse = FALSE, deriv = 0,
Numerical values of \code{theta} close to 1 or out of range
result in
\code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
- The arguments \code{short} and \code{tag} are used only if
- \code{theta} is character.
+
}
\value{
@@ -84,7 +68,7 @@ loglog(theta, earg = list(), inverse = FALSE, deriv = 0,
\note{
Numerical instability may occur when \code{theta} is
- close to 1 unless \code{earg} is used.
+ close to 1 unless \code{bvalue} is used.
}
@@ -94,13 +78,14 @@ loglog(theta, earg = list(), inverse = FALSE, deriv = 0,
\code{\link{loge}},
\code{\link{logoff}}.
+
}
\examples{
-x = seq(0.8, 1.5, by=0.1)
-loglog(x) # Has NAs
-loglog(x, earg = list(bvalue = 1.0 + .Machine$double.eps)) # Has no NAs
+x <- seq(0.8, 1.5, by = 0.1)
+loglog(x) # Has NAs
+loglog(x, bvalue = 1.0 + .Machine$double.eps) # Has no NAs
-x = seq(1.01, 10, len = 100)
+x <- seq(1.01, 10, len = 100)
loglog(x)
max(abs(loglog(loglog(x), inverse = TRUE) - x)) # Should be 0
}
diff --git a/man/lognormal.Rd b/man/lognormal.Rd
index 2d8610f..fa25f6e 100644
--- a/man/lognormal.Rd
+++ b/man/lognormal.Rd
@@ -9,10 +9,8 @@
}
\usage{
-lognormal(lmeanlog = "identity", lsdlog = "loge",
- emeanlog = list(), esdlog = list(), zero = 2)
+lognormal(lmeanlog = "identity", lsdlog = "loge", zero = 2)
lognormal3(lmeanlog = "identity", lsdlog = "loge",
- emeanlog = list(), esdlog = list(),
powers.try = (-3):3, delta = NULL, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
@@ -24,11 +22,16 @@ lognormal3(lmeanlog = "identity", lsdlog = "loge",
See \code{\link{Links}} for more choices.
}
- \item{emeanlog, esdlog}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
- }
+
+% \item{emeanlog, esdlog}{
+% emeanlog = list(), esdlog = list(),
+% emeanlog = list(), esdlog = list(),
+% List. Extra argument for each of the links.
+% See \code{earg} in \code{\link{Links}} for general information.
+% }
+
+
\item{zero}{
An integer-valued vector specifying which
linear/additive predictors are modelled as intercepts only.
@@ -117,19 +120,19 @@ Hoboken, NJ, USA: Wiley-Interscience.
\examples{
ldat <- data.frame(y = rlnorm(nn <- 1000, meanlog = 1.5, sdlog = exp(-0.8)))
fit <- vglm(y ~ 1, lognormal, ldat, trace = TRUE)
-coef(fit, mat = TRUE)
+coef(fit, matrix = TRUE)
Coef(fit)
ldat2 <- data.frame(x2 = runif(nn <- 1000))
ldat2 <- transform(ldat2, y = rlnorm(nn, mean = 0.5, sd = exp(x2)))
fit <- vglm(y ~ x2, lognormal(zero = 1), ldat2, trace = TRUE, crit = "c")
-coef(fit, mat = TRUE)
+coef(fit, matrix = TRUE)
Coef(fit)
lambda <- 4
ldat3 <- data.frame(y = lambda + rlnorm(n = 1000, mean = 1.5, sd = exp(-0.8)))
fit <- vglm(y ~ 1, lognormal3, ldat3, trace = TRUE, crit = "c")
-coef(fit, mat = TRUE)
+coef(fit, matrix = TRUE)
summary(fit)
}
\keyword{models}
diff --git a/man/logoff.Rd b/man/logoff.Rd
index 278e11e..33a220f 100644
--- a/man/logoff.Rd
+++ b/man/logoff.Rd
@@ -7,7 +7,7 @@
including its inverse and the first two derivatives.
}
\usage{
-logoff(theta, earg = list(offset=0), inverse = FALSE, deriv = 0,
+logoff(theta, offset = 0, inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE)
}
%- maybe also 'usage' for other objects documented here.
@@ -17,35 +17,34 @@ logoff(theta, earg = list(offset=0), inverse = FALSE, deriv = 0,
See below for further details.
}
- \item{earg}{
- List. Extra argument for passing in additional information.
- The \code{offset} component of the list \code{earg}
- is the offset value.
- See \code{\link{Links}} for general information about \code{earg}.
+ \item{offset}{
+ Offset value.
+ See \code{\link{Links}}.
+
+
+ }
+
+ \item{inverse, deriv, short, tag}{
+ Details at \code{\link{Links}}.
+
}
- \item{inverse}{ Logical. If \code{TRUE} the inverse function is computed. }
- \item{deriv}{ Order of the derivative. Integer with value 0, 1 or 2. }
- \item{short}{ Used for labelling the \code{blurb} slot of a
- \code{\link{vglmff-class}} object. }
- \item{tag}{ Used for labelling the linear/additive predictor in the
- \code{initialize} slot of a \code{\link{vglmff-class}} object.
- Contains a little more information if \code{TRUE}. }
}
\details{
The log-offset link function is very commonly used for parameters that
are greater than a certain value.
- In particular, it is defined by \code{log(theta+offset)} where
+ In particular, it is defined by \code{log(theta + offset)} where
\code{offset} is the offset value. For example,
- if \code{offset=0.5} then the value of \code{theta} is restricted
+ if \code{offset = 0.5} then the value of \code{theta} is restricted
to be greater than \eqn{-0.5}.
+
Numerical values of \code{theta} close to \code{-offset} or out of range
result in
\code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
- The arguments \code{short} and \code{tag} are used only if
- \code{theta} is character.
+
+
}
\value{
@@ -54,38 +53,46 @@ logoff(theta, earg = list(offset=0), inverse = FALSE, deriv = 0,
and if \code{inverse = TRUE} then
\code{exp(theta)-offset}.
+
For \code{deriv = 1}, then the function returns
\emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
if \code{inverse = FALSE},
else if \code{inverse = TRUE} then it returns the reciprocal.
+
Here, all logarithms are natural logarithms, i.e., to base \emph{e}.
+
}
\references{
McCullagh, P. and Nelder, J. A. (1989)
\emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
}
\author{ Thomas W. Yee }
\note{
The default means this function is identical to \code{\link{loge}}.
+
Numerical instability may occur when \code{theta} is
close to \code{-offset}.
+
}
\seealso{
\code{\link{Links}},
\code{\link{loge}}.
+
+
}
\examples{
\dontrun{
-logoff(seq(-0.2, 0.5, by=0.1))
-logoff(seq(-0.2, 0.5, by=0.1), earg=list(offset=0.5))
-log(seq(-0.2, 0.5, by=0.1) + 0.5) }
+logoff(seq(-0.2, 0.5, by = 0.1))
+logoff(seq(-0.2, 0.5, by = 0.1), offset = 0.5)
+ log(seq(-0.2, 0.5, by = 0.1) + 0.5) }
}
\keyword{math}
\keyword{models}
diff --git a/man/lomax.Rd b/man/lomax.Rd
index 17217f5..f711945 100644
--- a/man/lomax.Rd
+++ b/man/lomax.Rd
@@ -7,8 +7,8 @@
Lomax distribution.
}
\usage{
-lomax(lscale = "loge", lshape3.q = "loge", escale = list(),
- eshape3.q = list(), iscale = NULL, ishape3.q = 2, zero = NULL)
+lomax(lscale = "loge", lshape3.q = "loge",
+ iscale = NULL, ishape3.q = 2, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -18,11 +18,6 @@ lomax(lscale = "loge", lshape3.q = "loge", escale = list(),
See \code{\link{Links}} for more choices.
}
- \item{escale, eshape3.q}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{iscale, ishape3.q}{
Optional initial values for \code{scale} and \code{q}.
@@ -99,9 +94,9 @@ Hoboken, NJ, USA: Wiley-Interscience.
}
\examples{
-ldata = data.frame(y = rlomax(n = 1000, exp(1), exp(2)))
-fit = vglm(y ~ 1, lomax, ldata, trace = TRUE)
-fit = vglm(y ~ 1, lomax(iscale = exp(1), ishape3.q = exp(2)), ldata, trace = TRUE)
+ldata <- data.frame(y = rlomax(n = 1000, exp(1), exp(2)))
+fit <- vglm(y ~ 1, lomax, ldata, trace = TRUE)
+fit <- vglm(y ~ 1, lomax(iscale = exp(1), ishape3.q = exp(2)), ldata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/lomaxUC.Rd b/man/lomaxUC.Rd
index 3715d01..93df89f 100644
--- a/man/lomaxUC.Rd
+++ b/man/lomaxUC.Rd
@@ -61,8 +61,8 @@ Hoboken, NJ, USA: Wiley-Interscience.
}
\examples{
-ldata = data.frame(y = rlomax(n = 2000, 6, 2))
-fit = vglm(y ~ 1, lomax(ishape3.q = 2.1), ldata, trace = TRUE, crit = "coef")
+ldata <- data.frame(y = rlomax(n = 2000, 6, 2))
+fit <- vglm(y ~ 1, lomax(ishape3.q = 2.1), ldata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit)
}
diff --git a/man/lqnorm.Rd b/man/lqnorm.Rd
index d242b9b..09b2b67 100644
--- a/man/lqnorm.Rd
+++ b/man/lqnorm.Rd
@@ -8,8 +8,8 @@
}
\usage{
-lqnorm(qpower=2, link="identity", earg=list(),
- imethod=1, imu=NULL, shrinkage.init=0.95)
+lqnorm(qpower = 2, link = "identity",
+ imethod = 1, imu = NULL, shrinkage.init = 0.95)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -20,9 +20,8 @@ lqnorm(qpower=2, link="identity", earg=list(),
This quantity is minimized with respect to the regression coefficients.
}
- \item{link, earg}{
- Link function applied to the `mean' \eqn{\mu}{mu},
- and extra argument optionally used by the link function.
+ \item{link}{
+ Link function applied to the `mean' \eqn{\mu}{mu}.
See \code{\link{Links}} for more details.
}
@@ -34,7 +33,7 @@ lqnorm(qpower=2, link="identity", earg=list(),
}
\item{imu}{
Numeric, optional initial values used for the fitted values.
- The default is to use \code{imethod=1}.
+ The default is to use \code{imethod = 1}.
}
\item{shrinkage.init}{
@@ -42,7 +41,7 @@ lqnorm(qpower=2, link="identity", earg=list(),
The value must be between 0 and 1 inclusive, and
a value of 0 means the individual response values are used,
and a value of 1 means the median or mean is used.
- This argument is used in conjunction with \code{imethod=3}.
+ This argument is used in conjunction with \code{imethod = 3}.
}
}
@@ -59,6 +58,7 @@ lqnorm(qpower=2, link="identity", earg=list(),
it should be just a vector here since
this function handles only a single vector or one-column response.
+
Numerical problem will occur when \eqn{q} is too close to one.
Probably reasonable values range from 1.5 and up, say.
The value \eqn{q=2} corresponds to ordinary least squares while
@@ -66,12 +66,14 @@ lqnorm(qpower=2, link="identity", earg=list(),
distibution. The procedure becomes more sensitive to outliers the
larger the value of \eqn{q}.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
Yee, T. W. and Wild, C. J. (1996)
@@ -79,6 +81,7 @@ lqnorm(qpower=2, link="identity", earg=list(),
\emph{Journal of the Royal Statistical Society, Series B, Methodological},
\bold{58}, 481--493.
+
}
\author{ Thomas W. Yee }
@@ -90,40 +93,43 @@ lqnorm(qpower=2, link="identity", earg=list(),
called \code{objectiveFunction} which is the value of the
objective function at the final iteration.
+
}
\section{Warning }{
Convergence failure is common, therefore the user is advised to be
cautious and monitor convergence!
+
}
\seealso{
\code{\link{gaussianff}}.
+
}
\examples{
set.seed(123)
-ldat = data.frame(x = sort(runif(nn <- 10 )))
-realfun = function(x) 4 + 5*x
-ldat = transform(ldat, y = realfun(x) + rnorm(nn, sd=exp(1)))
+ldata <- data.frame(x = sort(runif(nn <- 10 )))
+realfun <- function(x) 4 + 5*x
+ldata <- transform(ldata, y = realfun(x) + rnorm(nn, sd = exp(-1)))
# Make the first observation an outlier
-ldat = transform(ldat, y = c(4*y[1], y[-1]), x=c(-1, x[-1]))
-fit = vglm(y ~ x, fam = lqnorm(qpower=1.2), data=ldat)
-coef(fit, matrix=TRUE)
+ldata <- transform(ldata, y = c(4*y[1], y[-1]), x = c(-1, x[-1]))
+fit <- vglm(y ~ x, fam = lqnorm(qpower = 1.2), data = ldata)
+coef(fit, matrix = TRUE)
head(fitted(fit))
fit at misc$qpower
fit at misc$objectiveFunction
\dontrun{
# Graphical check
-with(ldat, plot(x, y, main=paste("LS=red, lqnorm=blue (qpower = ",
- fit at misc$qpower, "), truth=black", sep=""), col="blue"))
-lmfit = lm(y ~ x, data=ldat)
-with(ldat, lines(x, fitted(fit), col="blue"))
-with(ldat, lines(x, lmfit$fitted, col="red"))
-with(ldat, lines(x, realfun(x), col="black")) }
+with(ldata, plot(x, y, main = paste("LS = red, lqnorm = blue (qpower = ",
+ fit at misc$qpower, "), truth = black", sep = ""), col = "blue"))
+lmfit <- lm(y ~ x, data = ldata)
+with(ldata, lines(x, fitted(fit), col = "blue"))
+with(ldata, lines(x, lmfit$fitted, col = "red"))
+with(ldata, lines(x, realfun(x), col = "black")) }
}
\keyword{models}
\keyword{regression}
diff --git a/man/lrtest.Rd b/man/lrtest.Rd
index 34eaebc..ee455fa 100644
--- a/man/lrtest.Rd
+++ b/man/lrtest.Rd
@@ -139,10 +139,10 @@
\examples{
set.seed(1)
-pneumo = transform(pneumo, let = log(exposure.time), x3 = runif(nrow(pneumo)))
-fit1 = vglm(cbind(normal, mild, severe) ~ let , propodds, pneumo)
-fit2 = vglm(cbind(normal, mild, severe) ~ let + x3, propodds, pneumo)
-fit3 = vglm(cbind(normal, mild, severe) ~ let , cumulative, pneumo)
+pneumo <- transform(pneumo, let = log(exposure.time), x3 = runif(nrow(pneumo)))
+fit1 <- vglm(cbind(normal, mild, severe) ~ let , propodds, pneumo)
+fit2 <- vglm(cbind(normal, mild, severe) ~ let + x3, propodds, pneumo)
+fit3 <- vglm(cbind(normal, mild, severe) ~ let , cumulative, pneumo)
# Various equivalent specifications of the LR test for testing x3
(ans1 <- lrtest(fit2, fit1))
ans2 <- lrtest(fit2, 2)
diff --git a/man/lv.Rd b/man/lv.Rd
index 3aa6db5..58ce9f2 100644
--- a/man/lv.Rd
+++ b/man/lv.Rd
@@ -26,10 +26,14 @@ lv(object, ...)
by ecologists.
Latent variables are linear combinations of the explanatory
variables.
+
+
}
\value{
The value returned depends specifically on the methods
function invoked.
+
+
}
\references{
Yee, T. W. and Hastie, T. J. (2003)
@@ -37,22 +41,27 @@ Reduced-rank vector generalized linear models.
\emph{Statistical Modelling},
\bold{3}, 15--41.
+
Yee, T. W. (2004)
A new technique for maximum-likelihood
canonical Gaussian ordination.
\emph{Ecological Monographs},
\bold{74}, 685--701.
+
Yee, T. W. (2006)
Constrained additive ordination.
\emph{Ecology}, \bold{87}, 203--213.
+
}
\author{ Thomas W. Yee }
\note{
Latent variables are not really applicable to
\code{\link{vglm}}/\code{\link{vgam}} models.
+
+
}
@@ -61,19 +70,21 @@ Constrained additive ordination.
\code{lv.rrvglm},
\code{lv.cao},
\code{\link{lvplot}}.
+
+
}
\examples{
\dontrun{
hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
set.seed(123)
-p1 = cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
- WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- family = poissonff, data = hspider, Rank = 1, df1.nl =
- c(Zoraspin=2.5, 3), Bestof = 3, Crow1positive = TRUE)
+p1 <- cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ family = poissonff, data = hspider, Rank = 1, df1.nl =
+ c(Zoraspin = 2.5, 3), Bestof = 3, Crow1positive = TRUE)
-var(lv(p1)) # Scaled to unit variance # Scaled to unit variance
-c(lv(p1)) # Estimated site scores
+var(lv(p1)) # Scaled to unit variance # Scaled to unit variance
+c(lv(p1)) # Estimated site scores
}
}
\keyword{models}
diff --git a/man/lvplot.Rd b/man/lvplot.Rd
index 241a66c..5b93bc9 100644
--- a/man/lvplot.Rd
+++ b/man/lvplot.Rd
@@ -27,10 +27,14 @@ lvplot(object, ...)
latent variables are often called the \emph{site scores}.
Latent variable plots were coined by Yee (2004), and have
the latent variable as at least one of its axes.
+
+
}
\value{
The value returned depends specifically on the methods
function invoked.
+
+
}
\references{
Yee, T. W. (2004)
@@ -57,19 +61,21 @@ Constrained additive ordination.
\code{lvplot.cao},
\code{\link{lv}},
\code{\link{trplot}}.
+
+
}
\examples{
\dontrun{
-hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
+hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars
set.seed(123)
-p1 = cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
- WaterCon + BareSand + FallTwig +
- CoveMoss + CoveHerb + ReflLux,
- family = poissonff, data = hspider, Bestof = 3,
- df1.nl = c(Zoraspin=2.5, 3), Crow1positive = TRUE)
-index = 1:ncol(p1 at y)
-lvplot(p1, lcol=index, pcol=index, y=TRUE, las=1)
+p1 <- cao(cbind(Pardlugu, Pardmont, Pardnigr, Pardpull, Zoraspin) ~
+ WaterCon + BareSand + FallTwig +
+ CoveMoss + CoveHerb + ReflLux,
+ family = poissonff, data = hspider, Bestof = 3,
+ df1.nl = c(Zoraspin = 2.5, 3), Crow1positive = TRUE)
+index <- 1:ncol(depvar(p1))
+lvplot(p1, lcol = index, pcol = index, y = TRUE, las = 1)
}
}
diff --git a/man/lvplot.qrrvglm.Rd b/man/lvplot.qrrvglm.Rd
index 97c091b..981aee2 100644
--- a/man/lvplot.qrrvglm.Rd
+++ b/man/lvplot.qrrvglm.Rd
@@ -46,7 +46,7 @@ lvplot.qrrvglm(object, varlvI = FALSE, reference = NULL,
These values are jittered to expose ties.
}
\item{y}{ Logical. If \code{TRUE}, the responses will be plotted
- (applies only to rank-1 models and if \code{type="fitted.values"}.)
+ (applies only to rank-1 models and if \code{type = "fitted.values"}.)
}
\item{type}{ Either \code{"fitted.values"} or \code{"predictors"},
specifies whether the y-axis is on the response or eta-scales
@@ -86,12 +86,12 @@ For rank-2 models, points are the optima.
assigned a value that is used for the elliptical contouring.
If \code{Absolute} is \code{FALSE} then \code{ellipse}
should be assigned a value between 0 and 1, for example,
- setting \code{ellipse=0.9} means an ellipse with contour
+ setting \code{ellipse = 0.9} means an ellipse with contour
= 90\% of the maximum will be plotted about each optimum.
If \code{ellipse} is a negative value, then the function checks
that the model is an equal-tolerances model and
- \code{varlvI=FALSE}, and if so, plots circles with
- radius \code{-ellipse}. For example, setting \code{ellipse=-1}
+ \code{varlvI = FALSE}, and if so, plots circles with
+ radius \code{-ellipse}. For example, setting \code{ellipse = -1}
will result in circular contours that have unit radius (in latent
variable units). If \code{ellipse} is \code{NULL} or \code{FALSE}
then no ellipse is drawn around the optima.
@@ -286,22 +286,22 @@ canonical Gaussian ordination.
\examples{
set.seed(123)
-nn = 200
-cdat = data.frame(x2 = rnorm(nn), # Has mean 0 (needed when ITol=TRUE)
- x3 = rnorm(nn), # Has mean 0 (needed when ITol=TRUE)
- x4 = rnorm(nn)) # Has mean 0 (needed when ITol=TRUE)
-cdat = transform(cdat, lv1 = x2 + x3 - 2*x4,
- lv2 = -x2 + x3 + 0*x4)
+nn <- 200
+cdata <- data.frame(x2 = rnorm(nn), # Has mean 0 (needed when ITol=TRUE)
+ x3 = rnorm(nn), # Has mean 0 (needed when ITol=TRUE)
+ x4 = rnorm(nn)) # Has mean 0 (needed when ITol=TRUE)
+cdata <- transform(cdata, lv1 = x2 + x3 - 2*x4,
+ lv2 = -x2 + x3 + 0*x4)
# Nb. lv2 is weakly correlated with lv1
-cdat = transform(cdat, lambda1 = exp(6 - 0.5 * (lv1-0)^2 - 0.5 * (lv2-0)^2),
- lambda2 = exp(5 - 0.5 * (lv1-1)^2 - 0.5 * (lv2-1)^2),
- lambda3 = exp(5 - 0.5 * (lv1+2)^2 - 0.5 * (lv2-0)^2))
-cdat = transform(cdat, spp1 = rpois(nn, lambda1),
- spp2 = rpois(nn, lambda2),
- spp3 = rpois(nn, lambda3))
+cdata <- transform(cdata, lambda1 = exp(6 - 0.5 * (lv1-0)^2 - 0.5 * (lv2-0)^2),
+ lambda2 = exp(5 - 0.5 * (lv1-1)^2 - 0.5 * (lv2-1)^2),
+ lambda3 = exp(5 - 0.5 * (lv1+2)^2 - 0.5 * (lv2-0)^2))
+cdata <- transform(cdata, spp1 = rpois(nn, lambda1),
+ spp2 = rpois(nn, lambda2),
+ spp3 = rpois(nn, lambda3))
set.seed(111)
-# vvv p2 = cqo(cbind(spp1,spp2,spp3) ~ x2 + x3 + x4, poissonff,
-# vvv data = cdat,
+# vvv p2 <- cqo(cbind(spp1,spp2,spp3) ~ x2 + x3 + x4, poissonff,
+# vvv data = cdata,
# vvv Rank=2, ITolerances=TRUE,
# vvv Crow1positive=c(TRUE,FALSE)) # deviance = 505.81
# vvv if (deviance(p2) > 506) stop("suboptimal fit obtained")
@@ -309,16 +309,16 @@ set.seed(111)
# vvv Coef(p2)
\dontrun{
-lvplot(p2, sites=TRUE, spch="*", scol="darkgreen", scex=1.5,
- chull=TRUE, label=TRUE, Absolute=TRUE, ellipse=140,
- adj=-0.5, pcol="blue", pcex=1.3, las=1,
- C=TRUE, Cadj=c(-.3,-.3,1), Clwd=2, Ccex=1.4, Ccol="red",
- main=paste("Contours at Abundance=140 with",
+lvplot(p2, sites = TRUE, spch = "*", scol = "darkgreen", scex = 1.5,
+ chull = TRUE, label = TRUE, Absolute = TRUE, ellipse = 140,
+ adj = -0.5, pcol = "blue", pcex = 1.3, las = 1,
+ C = TRUE, Cadj = c(-.3,-.3,1), Clwd = 2, Ccex = 1.4, Ccol = "red",
+ main = paste("Contours at Abundance = 140 with",
"convex hull of the site scores")) }
# vvv var(lv(p2)) # A diagonal matrix, i.e., uncorrelated latent variables
-# vvv var(lv(p2, varlvI=TRUE)) # Identity matrix
+# vvv var(lv(p2, varlvI = TRUE)) # Identity matrix
# vvv Tol(p2)[,,1:2] # Identity matrix
-# vvv Tol(p2, varlvI=TRUE)[,,1:2] # A diagonal matrix
+# vvv Tol(p2, varlvI = TRUE)[,,1:2] # A diagonal matrix
}
\keyword{models}
\keyword{regression}
diff --git a/man/lvplot.rrvglm.Rd b/man/lvplot.rrvglm.Rd
index ca7ea36..3fb8cb7 100644
--- a/man/lvplot.rrvglm.Rd
+++ b/man/lvplot.rrvglm.Rd
@@ -145,15 +145,15 @@ Reduced-rank vector generalized linear models.
\code{\link{rrvglm.control}}.
}
\examples{
-nn = nrow(pneumo) # x1, x2 and x3 are some unrelated covariates
-pneumo = transform(pneumo, slet=scale(log(exposure.time)),
- x1 = rnorm(nn), x2 = rnorm(nn), x3 = rnorm(nn))
-fit = rrvglm(cbind(normal, mild, severe) ~ slet + x1 + x2 + x3,
- multinomial, pneumo, Rank=2, Corner=FALSE, Uncorrel=TRUE)
+nn <- nrow(pneumo) # x1, x2 and x3 are some unrelated covariates
+pneumo <- transform(pneumo, slet = scale(log(exposure.time)),
+ x1 = rnorm(nn), x2 = rnorm(nn), x3 = rnorm(nn))
+fit <- rrvglm(cbind(normal, mild, severe) ~ slet + x1 + x2 + x3,
+ multinomial, pneumo, Rank=2, Corner=FALSE, Uncorrel=TRUE)
\dontrun{
-lvplot(fit, chull=TRUE, scores=TRUE, clty=2, ccol="blue", scol="red",
- Ccol="darkgreen", Clwd=2, Ccex=2,
- main="Biplot of some fictitional data") }
+lvplot(fit, chull = TRUE, scores = TRUE, clty = 2, ccol = "blue", scol = "red",
+ Ccol = "darkgreen", Clwd = 2, Ccex = 2,
+ main = "Biplot of some fictitional data") }
}
\keyword{models}
\keyword{regression}
diff --git a/man/makeham.Rd b/man/makeham.Rd
new file mode 100644
index 0000000..0c7ab13
--- /dev/null
+++ b/man/makeham.Rd
@@ -0,0 +1,158 @@
+\name{makeham}
+\alias{makeham}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Makeham Distribution Family Function }
+\description{
+ Maximum likelihood estimation of the 3-parameter
+ Makeham distribution.
+
+}
+\usage{
+makeham(lshape = "loge", lscale = "loge", lepsilon = "loge",
+ ishape = NULL, iscale = NULL, iepsilon = 0.3,
+ nsimEIM = 500, oim.mean = TRUE, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lshape, lscale, lepsilon}{
+ Parameter link functions applied to the
+ shape parameter \code{shape},
+ scale parameter \code{scale}, and
+ other parameter \code{epsilon}.
+ All parameters are treated as positive here
+ (cf. \code{\link{dmakeham}} allows \code{epsilon = 0}, etc.).
+ See \code{\link{Links}} for more choices.
+
+
+ }
+
+% \item{eshape, escale, eepsilon}{
+% List. Extra argument for each of the links.
+% See \code{earg} in \code{\link{Links}} for general information.
+% }
+
+ \item{ishape, iscale, iepsilon}{
+ Optional initial values.
+ A \code{NULL} means a value is computed internally.
+ A value must be given for \code{iepsilon} currently, and this
+ is a sensitive parameter!
+
+
+ }
+ \item{nsimEIM, zero}{
+ See \code{\link{CommonVGAMffArguments}}.
+ Argument \code{probs.y} is used only when \code{imethod = 2}.
+
+ }
+ \item{oim.mean}{
+ To be currently ignored.
+
+ }
+}
+\details{
+The Makeham distribution, which adds another parameter
+to the Gompertz distribution,
+has cumulative distribution function
+\deqn{F(x; \alpha, \beta, \varepsilon) =
+1 - \exp
+\left\{
+-y \varepsilon + \frac {\alpha}{\beta}
+\left[ 1 - e^{\beta y} \right]
+\right\}
+}{%
+F(x; alpha, beta, epsilon) = 1 - exp(-y * epsilon + (alpha / beta) * [1 - e^(beta * y)])
+}
+which leads to a probability density function
+\deqn{f(x; \alpha, \beta, \varepsilon) =
+\left[
+\varepsilon + \alpha e^{\beta x} \right]
+\;
+\exp
+\left\{
+-x \varepsilon + \frac {\alpha}{\beta}
+\left[ 1 - e^{\beta x} \right]
+\right\},
+}{%
+f(x; alpha, beta, epsilon) = (epsilon + alpha * e^(beta x) ) * exp(-x * epsilon + (alpha / beta) * [1 - e^(beta * x)])
+}
+for \eqn{\alpha > 0}{alpha > 0},
+\eqn{\beta > 0}{beta > 0},
+\eqn{\varepsilon \geq 0}{epsilon >= 0},
+\eqn{x > 0}.
+Here, \eqn{\beta}{beta} is called the scale parameter \code{scale},
+and \eqn{\alpha}{alpha} is called a shape parameter.
+The moments for this distribution do
+not appear to be available in closed form.
+
+
+Simulated Fisher scoring is used and multiple responses are handled.
+
+
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+
+}
+%\references{
+%
+%}
+
+\author{ T. W. Yee }
+\section{Warning }{
+A lot of care is needed because
+this is a rather difficult distribution for parameter estimation,
+especially when the shape parameter is large relative to the
+scale parameter.
+If the self-starting initial values fail then try experimenting
+with the initial value arguments, especially \code{iepsilon}.
+Successful convergence depends on having very good initial values.
+More improvements could be made here.
+Also, monitor convergence by setting \code{trace = TRUE}.
+
+
+A trick is to fit a \code{\link{gompertz}} distribution and use
+it for initial values; see below.
+However, this family function is currently numerically fraught.
+
+
+}
+
+\seealso{
+ \code{\link{dmakeham}},
+ \code{\link{gompertz}}.
+
+
+}
+
+\examples{
+\dontrun{ set.seed(123)
+mdata <- data.frame(x2 = runif(nn <- 1000))
+mdata <- transform(mdata, eta1 = -1,
+ ceta1 = 1,
+ eeta1 = -2)
+mdata <- transform(mdata, shape1 = exp(eta1),
+ scale1 = exp(ceta1),
+ epsil1 = exp(eeta1))
+mdata <- transform(mdata,
+ y1 = rmakeham(nn, shape = shape1, scale = scale1, eps = epsil1))
+
+# A trick is to fit a Gompertz distribution first
+fit0 <- vglm(y1 ~ 1, gompertz, data = mdata, trace = TRUE)
+fit1 <- vglm(y1 ~ 1, makeham, data = mdata,
+ etastart = cbind(predict(fit0), log(0.1)), trace = TRUE)
+
+coef(fit1, matrix = TRUE)
+summary(fit1)
+}
+}
+\keyword{models}
+\keyword{regression}
+
+
+%# fit1 <- vglm(y1 ~ 1, makeham, data = mdata, trace = TRUE)
+%# fit2 <- vglm(y1 ~ 1, makeham(imeth = 2), data = mdata, trace = TRUE)
+
diff --git a/man/makehamUC.Rd b/man/makehamUC.Rd
new file mode 100644
index 0000000..31c3b51
--- /dev/null
+++ b/man/makehamUC.Rd
@@ -0,0 +1,97 @@
+\name{Makeham}
+\alias{Makeham}
+\alias{dmakeham}
+\alias{pmakeham}
+\alias{qmakeham}
+\alias{rmakeham}
+\title{The Makeham Distribution}
+\description{
+ Density,
+ cumulative distribution function,
+ quantile function
+ and
+ random generation for
+ the Makeham distribution.
+
+}
+\usage{
+dmakeham(x, shape, scale = 1, epsilon = 0, log = FALSE)
+pmakeham(q, shape, scale = 1, epsilon = 0)
+qmakeham(p, shape, scale = 1, epsilon = 0)
+rmakeham(n, shape, scale = 1, epsilon = 0)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. }
+ \item{log}{
+ Logical.
+ If \code{log = TRUE} then the logarithm of the density is returned.
+
+ }
+ \item{shape, scale}{positive shape and scale parameters. }
+ \item{epsilon}{another parameter. Must be non-negative. See below. }
+
+}
+\value{
+ \code{dmakeham} gives the density,
+ \code{pmakeham} gives the cumulative distribution function,
+ \code{qmakeham} gives the quantile function, and
+ \code{rmakeham} generates random deviates.
+
+
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{makeham}} for details.
+ The default value of \code{epsilon = 0} corresponds
+ to the Gompertz distribution.
+ The function \code{\link{pmakeham}} uses \code{\link{lambertW}}.
+
+
+}
+\references{
+
+Jodra, P. (2009)
+A closed-form expression for the quantile function of the
+Gompertz-Makeham distribution.
+\emph{Mathematics and Computers in Simulation},
+\bold{79}, 3069--3075.
+
+
+
+}
+
+
+%\note{
+%
+%}
+\seealso{
+ \code{\link{makeham}},
+ \code{\link{lambertW}}.
+
+
+}
+\examples{
+probs <- seq(0.01, 0.99, by = 0.01)
+Shape <- exp(-1); Scale <- exp(1); eps = Epsilon <- exp(-1)
+max(abs(pmakeham(qmakeham(p = probs, Shape, sca = Scale, eps = Epsilon),
+ Shape, sca = Scale, eps = Epsilon) - probs)) # Should be 0
+
+\dontrun{ x <- seq(-0.1, 2.0, by = 0.01);
+plot(x, dmakeham(x, Shape, sca = Scale, eps = Epsilon), type = "l",
+ main = "Blue is density, orange is cumulative distribution function",
+ sub = "Purple lines are the 10,20,...,90 percentiles",
+ col = "blue", las = 1, ylab = "")
+abline(h = 0, col = "blue", lty = 2)
+lines(x, pmakeham(x, Shape, sca = Scale, eps = Epsilon), col = "orange")
+probs <- seq(0.1, 0.9, by = 0.1)
+Q <- qmakeham(probs, Shape, sca = Scale, eps = Epsilon)
+lines(Q, dmakeham(Q, Shape, sca = Scale, eps = Epsilon),
+ col = "purple", lty = 3, type = "h")
+pmakeham(Q, Shape, sca = Scale, eps = Epsilon) - probs # Should be all zero
+abline(h = probs, col = "purple", lty = 3) }
+}
+\keyword{distribution}
+
+
diff --git a/man/margeff.Rd b/man/margeff.Rd
index adb2649..af04ad6 100644
--- a/man/margeff.Rd
+++ b/man/margeff.Rd
@@ -99,26 +99,26 @@ margeff(object, subset = NULL)
\examples{
# Not a good example for multinomial() because the response is ordinal!!
-ii = 3; hh = 1/100
-pneumo = transform(pneumo, let = log(exposure.time))
-fit = vglm(cbind(normal, mild, severe) ~ let, multinomial, pneumo)
-fit = vglm(cbind(normal, mild, severe) ~ let,
- cumulative(reverse = TRUE, parallel = TRUE),
- data = pneumo)
-fitted(fit)[ii,]
-
-mynewdata = with(pneumo, data.frame(let = let[ii]+hh))
+ii <- 3; hh <- 1/100
+pneumo <- transform(pneumo, let = log(exposure.time))
+fit <- vglm(cbind(normal, mild, severe) ~ let, multinomial, pneumo)
+fit <- vglm(cbind(normal, mild, severe) ~ let,
+ cumulative(reverse = TRUE, parallel = TRUE),
+ data = pneumo)
+fitted(fit)[ii, ]
+
+mynewdata <- with(pneumo, data.frame(let = let[ii]+hh))
(newp <- predict(fit, newdata = mynewdata, type = "response"))
# Compare the difference. Should be the same as hh --> 0.
-round(dig = 3, (newp-fitted(fit)[ii,])/hh) # Finite-difference approximation
+round(dig = 3, (newp-fitted(fit)[ii, ])/hh) # Finite-difference approxn
round(dig = 3, margeff(fit, subset = ii)["let",])
# Other examples
round(dig = 3, margeff(fit))
round(dig = 3, margeff(fit, subset = 2)["let",])
-round(dig = 3, margeff(fit, subset = c(FALSE,TRUE))["let",,]) # recycling
-round(dig = 3, margeff(fit, subset = c(2,4,6,8))["let",,])
+round(dig = 3, margeff(fit, subset = c(FALSE, TRUE))["let",,]) # recycling
+round(dig = 3, margeff(fit, subset = c(2, 4, 6, 8))["let",,])
}
diff --git a/man/maxwell.Rd b/man/maxwell.Rd
index 8099f19..9058b84 100644
--- a/man/maxwell.Rd
+++ b/man/maxwell.Rd
@@ -8,13 +8,12 @@
}
\usage{
-maxwell(link = "loge", earg = list())
+maxwell(link = "loge", zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link, earg}{
- Parameter link function and extra argument
- applied to the parameter \eqn{a}.
+ \item{link, zero}{
+ Parameter link function applied to \eqn{a}.
See \code{\link{Links}} for more choices and information;
a log link is the default because the parameter is positive.
More information is at \code{\link{CommonVGAMffArguments}}.
@@ -54,8 +53,9 @@ maxwell(link = "loge", earg = list())
}
\author{ T. W. Yee }
\note{
- A related distribution is the Rayleigh distribution.
Fisher-scoring and Newton-Raphson are the same here.
+ A related distribution is the Rayleigh distribution.
+ This \pkg{VGAM} family function handles multiple responses.
}
@@ -64,10 +64,11 @@ maxwell(link = "loge", earg = list())
\code{\link{Maxwell}},
\code{\link{rayleigh}}.
+
}
\examples{
-mdata = data.frame(y = rmaxwell(1000, a = exp(2)))
-fit = vglm(y ~ 1, maxwell, mdata, trace = TRUE, crit = "coef")
+mdata <- data.frame(y = rmaxwell(1000, a = exp(2)))
+fit <- vglm(y ~ 1, maxwell, mdata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit)
}
diff --git a/man/maxwellUC.Rd b/man/maxwellUC.Rd
index 49c55d4..18c8f7b 100644
--- a/man/maxwellUC.Rd
+++ b/man/maxwellUC.Rd
@@ -79,7 +79,7 @@ Q <- qmaxwell(probs, a = a)
lines(Q, dmaxwell(Q, a), col = "purple", lty = 3, type = "h")
lines(Q, pmaxwell(Q, a), col = "purple", lty = 3, type = "h")
abline(h = probs, col = "purple", lty = 3)
-max(abs(pmaxwell(Q, a) - probs)) # Should be zero
+max(abs(pmaxwell(Q, a) - probs)) # Should be zero
}
}
\keyword{distribution}
diff --git a/man/mbinomial.Rd b/man/mbinomial.Rd
index 360608a..71ae2f7 100644
--- a/man/mbinomial.Rd
+++ b/man/mbinomial.Rd
@@ -8,7 +8,7 @@
}
\usage{
-mbinomial(mvar = NULL, link = "logit", earg = list(),
+mbinomial(mvar = NULL, link = "logit",
parallel = TRUE, smallno = .Machine$double.eps^(3/4))
}
%- maybe also 'usage' for other objects documented here.
@@ -21,9 +21,8 @@ mbinomial(mvar = NULL, link = "logit", earg = list(),
}
- \item{link, earg}{
- Parameter link function and extra argument for the probability
- parameter.
+ \item{link}{
+ Parameter link function for the probability parameter.
% called \eqn{p} below.
Information for these are at \code{\link{Links}}
and \code{\link{CommonVGAMffArguments}}.
@@ -141,27 +140,27 @@ mbinomial(mvar = NULL, link = "logit", earg = list(),
# Cf. Hastie and Tibshirani (1990) p.209. The variable n must be even.
# Here, the intercept for each matched set accounts for x3 which is
# the confounder or matching variable.
-n = 700 # Requires a big machine with lots of memory. Expensive wrt time
-n = 100 # This requires a reasonably big machine.
-mydat = data.frame(x2 = rnorm(n), x3 = rep(rnorm(n/2), each = 2))
-xmat = with(mydat, cbind(x2, x3))
-mydat = transform(mydat, eta = -0.1 + 0.2 * x2 + 0.3 * x3)
-etamat = with(mydat, matrix(eta, n/2, 2))
-condmu = exp(etamat[, 1]) / (exp(etamat[, 1]) + exp(etamat[, 2]))
-y1 = ifelse(runif(n/2) < condmu, 1, 0)
-y = cbind(y1, 1 - y1)
-mydat = transform(mydat, y = c(y1, 1-y1),
+n <- 700 # Requires a big machine with lots of memory. Expensive wrt time
+n <- 100 # This requires a reasonably big machine.
+mydat <- data.frame(x2 = rnorm(n), x3 = rep(rnorm(n/2), each = 2))
+xmat <- with(mydat, cbind(x2, x3))
+mydat <- transform(mydat, eta = -0.1 + 0.2 * x2 + 0.3 * x3)
+etamat <- with(mydat, matrix(eta, n/2, 2))
+condmu <- exp(etamat[, 1]) / (exp(etamat[, 1]) + exp(etamat[, 2]))
+y1 <- ifelse(runif(n/2) < condmu, 1, 0)
+y <- cbind(y1, 1 - y1)
+mydat <- transform(mydat, y = c(y1, 1-y1),
ID = factor(c(row(etamat))))
-fit = vglm(y ~ 1 + ID + x2, trace = TRUE,
- fam = mbinomial(mvar = ~ ID - 1), data = mydat)
+fit <- vglm(y ~ 1 + ID + x2, trace = TRUE,
+ mbinomial(mvar = ~ ID - 1), data = mydat)
dimnames(coef(fit, matrix = TRUE))
coef(fit, matrix = TRUE)
summary(fit)
head(fitted(fit))
-objsizemb = function(object) round(object.size(object) / 2^20, dig = 2)
+objsizemb <- function(object) round(object.size(object) / 2^20, dig = 2)
objsizemb(fit) # in Mb
-VLMX = model.matrix(fit, type = "vlm") # The big model matrix
+VLMX <- model.matrix(fit, type = "vlm") # The big model matrix
dim(VLMX)
objsizemb(VLMX) # in Mb
rm(VLMX) }
diff --git a/man/mccullagh89.Rd b/man/mccullagh89.Rd
index 3fe6c00..7da6c99 100644
--- a/man/mccullagh89.Rd
+++ b/man/mccullagh89.Rd
@@ -8,14 +8,13 @@
}
\usage{
-mccullagh89(ltheta = "rhobit", lnu = "logoff", itheta = NULL, inu = NULL,
- etheta = list(), enu = if(lnu == "logoff") list(offset = 0.5)
- else list(), zero = NULL)
+mccullagh89(ltheta = "rhobit", lnu = logoff(offset = 0.5),
+ itheta = NULL, inu = NULL, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{ltheta, lnu, etheta, enu}{
- Link functions and their extra arguments
+ \item{ltheta, lnu}{
+ Link functions
for the \eqn{\theta}{theta} and \eqn{\nu}{nu} parameters.
See \code{\link{Links}} for general information.
@@ -111,8 +110,8 @@ all else fails.
%}
\examples{
-mdata = data.frame(y = rnorm(n = 1000, sd = 0.2)) # Limit as theta = 0, nu = Inf
-fit = vglm(y ~ 1, mccullagh89, mdata, trace = TRUE)
+mdata <- data.frame(y = rnorm(n = 1000, sd = 0.2)) # Limit as theta = 0, nu = Inf
+fit <- vglm(y ~ 1, mccullagh89, mdata, trace = TRUE)
head(fitted(fit))
with(mdata, mean(y))
summary(fit)
diff --git a/man/micmen.Rd b/man/micmen.Rd
index c5064b7..443bcfc 100644
--- a/man/micmen.Rd
+++ b/man/micmen.Rd
@@ -8,12 +8,9 @@
}
\usage{
micmen(rpar = 0.001, divisor = 10, init1 = NULL, init2 = NULL,
- imethod = 1, oim = TRUE,
- link1 = "identity", link2 = "identity",
- firstDeriv = c("nsimEIM", "rpar"),
- earg1 = list(), earg2 = list(), prob.x = c(0.15, 0.85),
- nsimEIM = 500,
- dispersion = 0, zero = NULL)
+ imethod = 1, oim = TRUE, link1 = "identity", link2 = "identity",
+ firstDeriv = c("nsimEIM", "rpar"), probs.x = c(0.15, 0.85),
+ nsimEIM = 500, dispersion = 0, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -40,11 +37,6 @@ micmen(rpar = 0.001, divisor = 10, init1 = NULL, init2 = NULL,
See \code{\link{Links}} for more choices.
}
- \item{earg1, earg2}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{dispersion}{
Numerical. Dispersion parameter.
@@ -55,7 +47,7 @@ micmen(rpar = 0.001, divisor = 10, init1 = NULL, init2 = NULL,
The first is the default.
}
- \item{imethod, prob.x}{
+ \item{imethod, probs.x}{
See \code{\link{CommonVGAMffArguments}} for more information.
}
@@ -96,6 +88,7 @@ micmen(rpar = 0.001, divisor = 10, init1 = NULL, init2 = NULL,
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
Seber, G. A. F. and Wild, C. J. (1989)
@@ -154,7 +147,7 @@ micmen(rpar = 0.001, divisor = 10, init1 = NULL, init2 = NULL,
}
\examples{
-fit = vglm(velocity ~ 1, micmen, enzyme, trace = TRUE, crit = "coef",
+fit <- vglm(velocity ~ 1, micmen, enzyme, trace = TRUE, crit = "coef",
form2 = ~ conc - 1)
summary(fit)
@@ -164,8 +157,8 @@ summary(fit)
points(fitted(fit) ~ conc, enzyme, col = "red", pch = "+", cex = 1.5)
# This predicts the response at a finer grid:
-newenzyme = data.frame(conc = seq(0, max(with(enzyme, conc)), len = 200))
-fit at extra$Xm2 = newenzyme$conc # This assignment is needed for prediction
+newenzyme <- data.frame(conc = seq(0, max(with(enzyme, conc)), len = 200))
+fit at extra$Xm2 <- newenzyme$conc # This assignment is needed for prediction
lines(predict(fit, newenzyme, "response") ~ conc, newenzyme, col = "red") }
}
\keyword{models}
diff --git a/man/mix2exp.Rd b/man/mix2exp.Rd
index f40f332..b90ccca 100644
--- a/man/mix2exp.Rd
+++ b/man/mix2exp.Rd
@@ -9,9 +9,8 @@
}
\usage{
-mix2exp(lphi = "logit", llambda = "loge", ephi = list(),
- el1 = list(), el2 = list(), iphi = 0.5, il1 = NULL, il2 = NULL,
- qmu = c(0.8, 0.2), nsimEIM = 100, zero = 1)
+mix2exp(lphi = "logit", llambda = "loge", iphi = 0.5, il1 = NULL,
+ il2 = NULL, qmu = c(0.8, 0.2), nsimEIM = 100, zero = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -24,11 +23,6 @@ mix2exp(lphi = "logit", llambda = "loge", ephi = list(),
}
- \item{ephi, el1, el2}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{iphi, il1, il2}{
Initial value for \eqn{\phi}{phi}, and
optional initial value for \eqn{\lambda_1}{lambda1} and
@@ -114,22 +108,22 @@ mix2exp(lphi = "logit", llambda = "loge", ephi = list(),
}
\examples{
-lambda1 = exp(1); lambda2 = exp(3)
-(phi = logit(-1, inverse = TRUE))
-mdata = data.frame(y1 = rexp(nn <- 1000, lambda1))
-mdata = transform(mdata, y2 = rexp(nn, lambda2))
-mdata = transform(mdata, Y = ifelse(runif(nn) < phi, y1, y2))
-fit = vglm(Y ~ 1, mix2exp, mdata, trace = TRUE)
+\dontrun{ lambda1 <- exp(1); lambda2 <- exp(3)
+(phi <- logit(-1, inverse = TRUE))
+mdata <- data.frame(y1 = rexp(nn <- 1000, lambda1))
+mdata <- transform(mdata, y2 = rexp(nn, lambda2))
+mdata <- transform(mdata, Y = ifelse(runif(nn) < phi, y1, y2))
+fit <- vglm(Y ~ 1, mix2exp, mdata, trace = TRUE)
coef(fit, matrix = TRUE)
# Compare the results with the truth
round(rbind('Estimated' = Coef(fit),
'Truth' = c(phi, lambda1, lambda2)), dig = 2)
-\dontrun{# Plot the results
with(mdata, hist(Y, prob = TRUE, main = "Orange = estimate, blue = truth"))
abline(v = 1 / Coef(fit)[c(2, 3)], lty = 2, col = "orange", lwd = 2)
-abline(v = 1 / c(lambda1, lambda2), lty = 2, col = "blue", lwd = 2) }
+abline(v = 1 / c(lambda1, lambda2), lty = 2, col = "blue", lwd = 2)
+}
}
\keyword{models}
\keyword{regression}
diff --git a/man/mix2normal1.Rd b/man/mix2normal1.Rd
index 0b73ea8..2d848f2 100644
--- a/man/mix2normal1.Rd
+++ b/man/mix2normal1.Rd
@@ -9,8 +9,6 @@
}
\usage{
mix2normal1(lphi = "logit", lmu = "identity", lsd = "loge",
- ephi = list(), emu1 = list(), emu2 = list(),
- esd1 = list(), esd2 = list(),
iphi = 0.5, imu1 = NULL, imu2 = NULL, isd1 = NULL, isd2 = NULL,
qmu = c(0.2, 0.8), equalsd = TRUE, nsimEIM = 100, zero = 1)
}
@@ -22,23 +20,27 @@ mix2normal1(lphi = "logit", lmu = "identity", lsd = "loge",
\eqn{\sigma}{sd}.
See \code{\link{Links}} for more choices.
- }
- \item{ephi, emu1, emu2, esd1, esd2}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
- If \code{equalsd = TRUE} then \code{esd1} must equal \code{esd2}.
}
+
+% \item{ephi, emu1, emu2, esd1, esd2}{
+% List. Extra argument for each of the links.
+% See \code{earg} in \code{\link{Links}} for general information.
+% If \code{equalsd = TRUE} then \code{esd1} must equal \code{esd2}.
+% }
+
\item{iphi}{
Initial value for \eqn{\phi}{phi}, whose value must lie
between 0 and 1.
+
}
\item{imu1, imu2}{
Optional initial value for \eqn{\mu_1}{mu1} and \eqn{\mu_2}{mu2}.
The default is to compute initial values internally using
the argument \code{qmu}.
+
}
\item{isd1, isd2}{
Optional initial value for \eqn{\sigma_1}{sd1} and \eqn{\sigma_2}{sd2}.
@@ -47,6 +49,7 @@ mix2normal1(lphi = "logit", lmu = "identity", lsd = "loge",
Currently these are not great, therefore using these arguments
where practical is a good idea.
+
}
\item{qmu}{
Vector with two values giving the probabilities relating to the sample
@@ -55,16 +58,19 @@ mix2normal1(lphi = "logit", lmu = "identity", lsd = "loge",
The two values are fed in as the \code{probs} argument into
\code{\link[stats]{quantile}}.
+
}
\item{equalsd}{
Logical indicating whether the two standard deviations should be
constrained to be equal. If \code{TRUE} then the appropriate
constraint matrices will be used.
+
}
\item{nsimEIM}{
See \code{\link{CommonVGAMffArguments}}.
+
}
\item{zero}{
An integer specifying which linear/additive predictor is modelled as
@@ -76,6 +82,7 @@ mix2normal1(lphi = "logit", lmu = "identity", lsd = "loge",
functions of the explanatory variables.
See \code{\link{CommonVGAMffArguments}} for more information.
+
}
}
\details{
@@ -103,6 +110,7 @@ mix2normal1(lphi = "logit", lmu = "identity", lsd = "loge",
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
@@ -169,32 +177,35 @@ London: Chapman & Hall.
\code{\link[stats:Normal]{Normal}},
\code{\link{mix2poisson}}.
+
}
\examples{
-mu1 = 99; mu2 = 150; nn = 1000
-sd1 = sd2 = exp(3)
-(phi = logit(-1, inverse = TRUE))
-mdata = data.frame(y = ifelse(runif(nn) < phi, rnorm(nn, mu1, sd1),
- rnorm(nn, mu2, sd2)))
-fit = vglm(y ~ 1, mix2normal1(equalsd = TRUE), mdata)
+\dontrun{ mu1 <- 99; mu2 <- 150; nn <- 1000
+sd1 <- sd2 <- exp(3)
+(phi <- logit(-1, inverse = TRUE))
+mdata <- data.frame(y = ifelse(runif(nn) < phi, rnorm(nn, mu1, sd1),
+ rnorm(nn, mu2, sd2)))
+fit <- vglm(y ~ 1, mix2normal1(equalsd = TRUE), mdata)
# Compare the results
-cfit = coef(fit)
+cfit <- coef(fit)
round(rbind('Estimated' = c(logit(cfit[1], inverse = TRUE),
- cfit[2], exp(cfit[3]), cfit[4]), 'Truth' = c(phi, mu1, sd1, mu2)), dig = 2)
+ cfit[2], exp(cfit[3]), cfit[4]),
+ 'Truth' = c(phi, mu1, sd1, mu2)), dig = 2)
-\dontrun{# Plot the results
-xx = with(mdata, seq(min(y), max(y), len = 200))
+# Plot the results
+xx <- with(mdata, seq(min(y), max(y), len = 200))
plot(xx, (1-phi)*dnorm(xx, mu2, sd2), type = "l", xlab = "y",
- main = "Orange=estimate, blue=truth", col = "blue", ylab = "Density")
-phi.est = logit(coef(fit)[1], inverse = TRUE)
-sd.est = exp(coef(fit)[3])
+ main = "Orange = estimate, blue = truth", col = "blue", ylab = "Density")
+phi.est <- logit(coef(fit)[1], inverse = TRUE)
+sd.est <- exp(coef(fit)[3])
lines(xx, phi*dnorm(xx, mu1, sd1), col = "blue")
lines(xx, phi.est * dnorm(xx, Coef(fit)[2], sd.est), col = "orange")
lines(xx, (1-phi.est) * dnorm(xx, Coef(fit)[4], sd.est), col = "orange")
abline(v = Coef(fit)[c(2,4)], lty = 2, col = "orange")
-abline(v = c(mu1, mu2), lty = 2, col = "blue") }
+abline(v = c(mu1, mu2), lty = 2, col = "blue")
+}
}
\keyword{models}
\keyword{regression}
diff --git a/man/mix2poisson.Rd b/man/mix2poisson.Rd
index 473c390..2671cf5 100644
--- a/man/mix2poisson.Rd
+++ b/man/mix2poisson.Rd
@@ -9,7 +9,6 @@
}
\usage{
mix2poisson(lphi = "logit", llambda = "loge",
- ephi = list(), el1 = list(), el2 = list(),
iphi = 0.5, il1 = NULL, il2 = NULL,
qmu = c(0.2, 0.8), nsimEIM = 100, zero = 1)
}
@@ -21,11 +20,13 @@ mix2poisson(lphi = "logit", llambda = "loge",
See \code{\link{Links}} for more choices.
}
- \item{ephi, el1, el2}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
- }
+% \item{ephi, el1, el2}{
+% ephi = list(), el1 = list(), el2 = list(),
+% List. Extra argument for each of the links.
+% See \code{earg} in \code{\link{Links}} for general information.
+% }
+
\item{iphi}{
Initial value for \eqn{\phi}{phi}, whose value must lie
between 0 and 1.
@@ -36,6 +37,8 @@ mix2poisson(lphi = "logit", llambda = "loge",
\eqn{\lambda_2}{lambda2}. These values must be positive.
The default is to compute initial values internally using
the argument \code{qmu}.
+% If these arguments are supplied then practical experience
+% suggests they should be quite well-separated.
}
\item{qmu}{
@@ -90,6 +93,7 @@ mix2poisson(lphi = "logit", llambda = "loge",
see the example below.
+
}
\author{ T. W. Yee }
@@ -119,39 +123,39 @@ mix2poisson(lphi = "logit", llambda = "loge",
}
\examples{
-# Example 1: simulated data
-nn = 1000
-mu1 = exp(2.5) # also known as lambda1
-mu2 = exp(3)
-(phi = logit(-0.5, inverse = TRUE))
-mdata = data.frame(y = ifelse(runif(nn) < phi, rpois(nn, mu1), rpois(nn, mu2)))
-fit = vglm(y ~ 1, mix2poisson, mdata)
+\dontrun{ # Example 1: simulated data
+nn <- 1000
+mu1 <- exp(2.5) # also known as lambda1
+mu2 <- exp(3)
+(phi <- logit(-0.5, inverse = TRUE))
+mdata <- data.frame(y = rpois(nn, ifelse(runif(nn) < phi, mu1, mu2)))
+fit <- vglm(y ~ 1, mix2poisson, mdata)
coef(fit, matrix = TRUE)
# Compare the results with the truth
round(rbind('Estimated' = Coef(fit), 'Truth' = c(phi, mu1, mu2)), dig = 2)
-\dontrun{# Plot the results
-ty = with(mdata, table(y))
+ty <- with(mdata, table(y))
plot(names(ty), ty, type = "h", main = "Orange=estimate, blue=truth",
ylab = "Frequency", xlab = "y")
abline(v = Coef(fit)[-1], lty = 2, col = "orange", lwd = 2)
-abline(v = c(mu1, mu2), lty = 2, col = "blue", lwd = 2) }
+abline(v = c(mu1, mu2), lty = 2, col = "blue", lwd = 2)
# Example 2: London Times data (Lange, 1997, p.31)
-ltdata1 = data.frame(deaths = 0:9,
- freq = c(162, 267, 271, 185, 111, 61, 27, 8, 3, 1))
-ltdata2 = data.frame(y = with(ltdata1, rep(deaths, freq)))
+ltdata1 <- data.frame(deaths = 0:9,
+ freq = c(162, 267, 271, 185, 111, 61, 27, 8, 3, 1))
+ltdata2 <- data.frame(y = with(ltdata1, rep(deaths, freq)))
# Usually this does not work well unless nsimEIM is large
-fit = vglm(deaths ~ 1, weight = freq, data = ltdata1,
- mix2poisson(iphi = 0.3, il1 = 1, il2 = 2.5, nsimEIM = 5000))
+fit <- vglm(deaths ~ 1, weight = freq, data = ltdata1,
+ mix2poisson(iphi = 0.3, il1 = 1, il2 = 2.5, nsimEIM = 5000))
# This works better in general
-fit = vglm(y ~ 1, mix2poisson(iphi = 0.3, il1 = 1, il2 = 2.5), ltdata2)
+fit <- vglm(y ~ 1, mix2poisson(iphi = 0.3, il1 = 1, il2 = 2.5), ltdata2)
coef(fit, matrix = TRUE)
Coef(fit)
}
+}
\keyword{models}
\keyword{regression}
diff --git a/man/mlogit.Rd b/man/mlogit.Rd
new file mode 100644
index 0000000..4eeb437
--- /dev/null
+++ b/man/mlogit.Rd
@@ -0,0 +1,107 @@
+\name{mlogit}
+\alias{mlogit}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Multinomial Logit Link Function }
+\description{
+ Computes the mlogit transformation, including its inverse and the
+ first two derivatives.
+
+}
+\usage{
+mlogit(theta, refLevel = "last", M = NULL, whitespace = FALSE,
+ bvalue = NULL, inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{theta}{
+ Numeric or character.
+ See below for further details.
+
+ }
+ \item{refLevel, M, whitespace}{
+ See \code{\link{multinomial}}.
+
+
+ }
+ \item{bvalue}{
+ See \code{\link{Links}}.
+
+
+ }
+
+ \item{inverse, deriv, short, tag}{
+ Details at \code{\link{Links}}.
+
+
+ }
+
+}
+\details{
+ The \code{mlogit()} link function is a generalization of the
+ \code{\link{logit}} link to \eqn{M} levels/classes.
+ It forms the basis of the \code{\link{multinomial}} logit model.
+
+
+}
+\value{
+ For \code{mlogit} with \code{deriv = 0}, the mlogit of \code{theta}, i.e.,
+ \code{log(theta[,j]/theta[,M+1])} when \code{inverse = FALSE},
+ and if \code{inverse = TRUE} then
+ \code{exp(theta[,j])/(1+rowSums(exp(theta)))}.
+
+
+ For \code{deriv = 1}, then the function returns
+ \emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
+ if \code{inverse = FALSE},
+ else if \code{inverse = TRUE} then it returns the reciprocal.
+
+
+ Here, all logarithms are natural logarithms, i.e., to base \emph{e}.
+
+
+}
+\references{
+ McCullagh, P. and Nelder, J. A. (1989)
+ \emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
+
+}
+\author{ Thomas W. Yee }
+
+\note{
+ Numerical instability may occur when \code{theta} is
+ close to 1 or 0 (for \code{mlogit}).
+ One way of overcoming this is to use, e.g., \code{bvalue}.
+
+
+}
+
+\seealso{
+ \code{\link{Links}},
+ \code{\link{multinomial}},
+ \code{\link{logit}}.
+
+
+ }
+\examples{
+pneumo <- transform(pneumo, let = log(exposure.time))
+fit <- vglm(cbind(normal, mild, severe) ~ let,
+ multinomial, trace = TRUE, pneumo) # For illustration only
+fitted(fit)
+predict(fit)
+
+mlogit(predict(fit))
+mlogit(predict(fit), refLevel = 1) # For illustration only
+mlogit(predict(fit)) - fitted(fit) # Should be all 0s
+
+mlogit(fitted(fit), inverse = TRUE)
+mlogit(fitted(fit), inverse = TRUE) - predict(fit) # Should be all 0s
+
+mlogit(fitted(fit), deriv = 1)
+mlogit(fitted(fit), deriv = 2)
+}
+\keyword{math}
+\keyword{models}
+\keyword{regression}
+
diff --git a/man/model.framevlm.Rd b/man/model.framevlm.Rd
index 1b00dfb..dd6d074 100644
--- a/man/model.framevlm.Rd
+++ b/man/model.framevlm.Rd
@@ -12,12 +12,15 @@ model.framevlm(object, setupsmart = TRUE, wrapupsmart = TRUE, \dots)
\item{\dots}{further arguments such as \code{data}, \code{na.action},
\code{subset}.
See \code{\link[stats]{model.frame}} for more information on these.
+
+
}
\item{setupsmart, wrapupsmart}{
Logical.
Arguments to determine whether to use smart prediction.
+
}
}
@@ -25,6 +28,8 @@ model.framevlm(object, setupsmart = TRUE, wrapupsmart = TRUE, \dots)
This function returns a \code{\link{data.frame}} with the variables.
It is applied to an object which inherits from class \code{"vlm"} (e.g.,
a fitted model of class \code{"vglm"}).
+
+
}
\details{Since \code{object} is
an object which inherits from class \code{"vlm"} (e.g.,
@@ -34,10 +39,12 @@ model.framevlm(object, setupsmart = TRUE, wrapupsmart = TRUE, \dots)
\code{model = TRUE}) or pass the call used when fitting on to the
default method.
+
This code implements \emph{smart prediction}
(see \code{\link{smartpred}}).
+
}
\value{
A \code{\link{data.frame}} containing the variables used in
@@ -63,22 +70,21 @@ model.framevlm(object, setupsmart = TRUE, wrapupsmart = TRUE, \dots)
}
\examples{
# Illustrates smart prediction
-pneumo = transform(pneumo, let = log(exposure.time))
-fit = vglm(cbind(normal,mild, severe) ~ poly(c(scale(let)), 2),
- fam = multinomial,
- data = pneumo, trace = TRUE, x = FALSE)
+pneumo <- transform(pneumo, let = log(exposure.time))
+fit <- vglm(cbind(normal,mild, severe) ~ poly(c(scale(let)), 2),
+ multinomial, data = pneumo, trace = TRUE, x = FALSE)
class(fit)
-check1 = head(model.frame(fit))
+check1 <- head(model.frame(fit))
check1
-check2 = model.frame(fit, data = head(pneumo))
+check2 <- model.frame(fit, data = head(pneumo))
check2
all.equal(unlist(check1), unlist(check2)) # Should be TRUE
-q0 = head(predict(fit))
-q1 = head(predict(fit, newdata = pneumo))
-q2 = predict(fit, newdata = head(pneumo))
-all.equal(q0, q1) # Should be TRUE
-all.equal(q1, q2) # Should be TRUE
+q0 <- head(predict(fit))
+q1 <- head(predict(fit, newdata = pneumo))
+q2 <- predict(fit, newdata = head(pneumo))
+all.equal(q0, q1) # Should be TRUE
+all.equal(q1, q2) # Should be TRUE
}
\keyword{models}
diff --git a/man/model.matrixvlm.Rd b/man/model.matrixvlm.Rd
index 30c8a20..a219c9e 100644
--- a/man/model.matrixvlm.Rd
+++ b/man/model.matrixvlm.Rd
@@ -8,6 +8,7 @@ model.matrixvlm(object, type = c("vlm", "lm", "lm2", "bothlmlm2"),
\arguments{
\item{object}{an object of a class that inherits from the
\emph{vector linear model} (VLM).
+
}
\item{type}{Type of design matrix returned. The first is the default.
The value \code{"vlm"} is the VLM model matrix corresponding
@@ -93,23 +94,22 @@ Reduced-rank vector generalized linear models.
}
\examples{
# Illustrates smart prediction
-pneumo = transform(pneumo, let = log(exposure.time))
-fit = vglm(cbind(normal, mild, severe) ~ poly(c(scale(let)), 2),
- family = multinomial,
- data = pneumo, trace = TRUE, x = FALSE)
+pneumo <- transform(pneumo, let = log(exposure.time))
+fit <- vglm(cbind(normal, mild, severe) ~ poly(c(scale(let)), 2),
+ multinomial, data = pneumo, trace = TRUE, x = FALSE)
class(fit)
fit at x # Not saved on the object
model.matrix(fit)
model.matrix(fit, lapred.index = 1, type = "lm")
model.matrix(fit, lapred.index = 2, type = "lm")
-(Check1 = head(model.matrix(fit, type = "lm")))
-(Check2 = model.matrix(fit, data = head(pneumo), type = "lm"))
+(Check1 <- head(model.matrix(fit, type = "lm")))
+(Check2 <- model.matrix(fit, data = head(pneumo), type = "lm"))
all.equal(c(Check1), c(Check2))
-q0 = head(predict(fit))
-q1 = head(predict(fit, newdata = pneumo))
-q2 = predict(fit, newdata = head(pneumo))
+q0 <- head(predict(fit))
+q1 <- head(predict(fit, newdata = pneumo))
+q2 <- predict(fit, newdata = head(pneumo))
all.equal(q0, q1) # Should be TRUE
all.equal(q1, q2) # Should be TRUE
}
diff --git a/man/moffset.Rd b/man/moffset.Rd
index 9c2fa43..5ae1590 100644
--- a/man/moffset.Rd
+++ b/man/moffset.Rd
@@ -60,10 +60,10 @@ moffset(mat, roffset = 0, coffset = 0, postfix = "")
considering a daily effect.
- This is a data preprocessing function for \code{\link{rcam}}
- and \code{\link{plotrcam0}}. The differences between
- \code{\link{Rcam}} and \code{\link{moffset}} is that
- \code{\link{Rcam}} only reorders the level of the rows and columns
+ This is a data preprocessing function for \code{\link{rcim}}
+ and \code{\link{plotrcim0}}. The differences between
+ \code{\link{Rcim}} and \code{\link{moffset}} is that
+ \code{\link{Rcim}} only reorders the level of the rows and columns
so that the data is shifted but not moved.
That is, a value in one row stays in that row,
and ditto for column.
@@ -95,23 +95,23 @@ moffset(mat, roffset = 0, coffset = 0, postfix = "")
}
\seealso{
- \code{\link{Rcam}},
- \code{\link{rcam}},
- \code{\link{plotrcam0}},
+ \code{\link{Rcim}},
+ \code{\link{rcim}},
+ \code{\link{plotrcim0}},
\code{\link{alcoff}},
\code{\link{crashi}}.
}
\examples{
moffset(alcoff, 3, 2, "*") # Some day's data is moved to previous day.
-Rcam(alcoff, 3 + 1, 2 + 1) # Data does not move as much.
+Rcim(alcoff, 3 + 1, 2 + 1) # Data does not move as much.
alcoff # Original data
-moffset(alcoff, 3, 2, "*") - Rcam(alcoff, 3+1, 2+1) # Note the differences
+moffset(alcoff, 3, 2, "*") - Rcim(alcoff, 3+1, 2+1) # Note the differences
# An 'effective day' data set:
alcoff.e <- moffset(alcoff, roffset = "6", postfix = "*")
-fit.o <- rcam(alcoff) # default baselines are first row and col
-fit.e <- rcam(alcoff.e) # default baselines are first row and col
+fit.o <- rcim(alcoff) # default baselines are first row and col
+fit.e <- rcim(alcoff.e) # default baselines are first row and col
\dontrun{ par(mfrow = c(2, 2), mar = c(9,4,2,1))
plot(fit.o, rsub = "Not very interpretable", csub = "Not very interpretable")
@@ -124,8 +124,8 @@ moffset(alcoff, 1, 1, "*")
moffset(alcoff, 2, 3, "*")
moffset(alcoff, 1, 0, "*")
moffset(alcoff, 0, 1, "*")
-moffset(alcoff, "6", "Monday", "*") # This one is good
+moffset(alcoff, "6", "Mon", "*") # This one is good
# Customise row and column baselines
-fit2 <- rcam(Rcam(alcoff.e, rbaseline = "11", cbaseline = "Monday*"))
+fit2 <- rcim(Rcim(alcoff.e, rbaseline = "11", cbaseline = "Mon*"))
}
diff --git a/man/morgenstern.Rd b/man/morgenstern.Rd
index 3d63f78..b1d6390 100644
--- a/man/morgenstern.Rd
+++ b/man/morgenstern.Rd
@@ -8,19 +8,18 @@
}
\usage{
-morgenstern(lapar = "rhobit", earg = list(), iapar = NULL, tola0 = 0.01,
- imethod = 1)
+morgenstern(lapar = "rhobit", iapar = NULL, tola0 = 0.01, imethod = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{lapar, earg}{
- Link function
- and extra argument for the
+ \item{lapar}{
+ Link function for the
association parameter
\eqn{\alpha}{alpha}, which lies between \eqn{-1} and \eqn{1}.
See \code{\link{Links}} for more choices
and other information.
+
}
\item{iapar}{
Numeric. Optional initial value for \eqn{\alpha}{alpha}.
@@ -107,11 +106,11 @@ Hoboken, NJ, USA: Wiley-Interscience.
}
\examples{
-N = 1000; mdata = data.frame(y1 = rexp(N), y2 = rexp(N))
+N <- 1000; mdata <- data.frame(y1 = rexp(N), y2 = rexp(N))
\dontrun{plot(ymat)}
-fit = vglm(cbind(y1, y2) ~ 1, morgenstern, mdata, trace = TRUE)
+fit <- vglm(cbind(y1, y2) ~ 1, morgenstern, mdata, trace = TRUE)
# This may fail:
-fit = vglm(cbind(y1, y2) ~ 1, morgenstern, mdata, trace = TRUE, crit = "coef")
+fit <- vglm(cbind(y1, y2) ~ 1, morgenstern, mdata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit)
head(fitted(fit))
diff --git a/man/multinomial.Rd b/man/multinomial.Rd
index 24c63d8..09c79d1 100644
--- a/man/multinomial.Rd
+++ b/man/multinomial.Rd
@@ -84,6 +84,7 @@ multinomial(zero = NULL, parallel = FALSE, nointercept = NULL,
\code{\link{rrvglm}}
and \code{\link{vgam}}.
+
}
\references{
@@ -107,19 +108,19 @@ London: Chapman & Hall.
Agresti, A. (2002)
\emph{Categorical Data Analysis},
-2nd ed. New York: Wiley.
+2nd ed. New York, USA: Wiley.
Hastie, T. J., Tibshirani, R. J. and Friedman, J. H. (2009)
\emph{The Elements of Statistical Learning: Data Mining,
Inference and Prediction},
2nd ed.
-New York: Springer-Verlag.
+New York, USA: Springer-Verlag.
Simonoff, J. S. (2003)
\emph{Analyzing Categorical Data},
-New York: Springer-Verlag.
+New York, USA: Springer-Verlag.
Anderson, J. A. (1984)
@@ -128,6 +129,12 @@ Regression and ordered categorical variables.
\bold{46}, 1--30.
+Tutz, G. (2012)
+\emph{Regression for Categorical Data},
+Cambridge University Press.
+
+
+
Further information and examples on categorical data analysis
by the \pkg{VGAM} package can be found at
\url{http://www.stat.auckland.ac.nz/~yee/VGAM/doc/categorical.pdf}.
@@ -212,23 +219,26 @@ by the \pkg{VGAM} package can be found at
\code{\link{rrvglm}},
\code{\link{fill1}},
\code{\link[stats:Multinom]{Multinomial}},
+ \code{\link{mlogit}},
\code{\link[datasets]{iris}}.
The author's homepage has further documentation about
categorical data analysis using \pkg{VGAM}.
+
}
% \code{\link[base:Multinom]{rmultinom}}
+
\examples{
# Example 1: fit a multinomial logit model to Edgar Anderson's iris data
data(iris)
-\dontrun{ fit = vglm(Species ~ ., multinomial, iris)
+\dontrun{ fit <- vglm(Species ~ ., multinomial, iris)
coef(fit, matrix = TRUE) }
# Example 2a: a simple example
-ycounts = t(rmultinom(10, size = 20, prob = c(0.1, 0.2, 0.8))) # Counts
-fit = vglm(ycounts ~ 1, multinomial)
+ycounts <- t(rmultinom(10, size = 20, prob = c(0.1, 0.2, 0.8))) # Counts
+fit <- vglm(ycounts ~ 1, multinomial)
head(fitted(fit)) # Proportions
fit at prior.weights # NOT recommended for extraction of prior weights
weights(fit, type = "prior", matrix = FALSE) # The better method
@@ -236,19 +246,19 @@ depvar(fit) # Sample proportions; same as fit at y
constraints(fit) # Constraint matrices
# Example 2b: Different reference level used as the baseline
-fit2 = vglm(ycounts ~ 1, multinomial(refLevel = 2))
+fit2 <- vglm(ycounts ~ 1, multinomial(refLevel = 2))
coef(fit2, matrix = TRUE)
coef(fit , matrix = TRUE) # Easy to reconcile this output with fit2
# Example 3: The response is a factor.
-nn = 10
-dframe3 = data.frame(yfactor = gl(3, nn, labels = c("Control", "Trt1", "Trt2")),
+nn <- 10
+dframe3 <- data.frame(yfactor = gl(3, nn, labels = c("Control", "Trt1", "Trt2")),
x2 = runif(3 * nn))
-myrefLevel = with(dframe3, yfactor[12])
-fit3a = vglm(yfactor ~ x2, multinomial(refLevel = myrefLevel), dframe3)
-fit3b = vglm(yfactor ~ x2, multinomial(refLevel = 2), dframe3)
+myrefLevel <- with(dframe3, yfactor[12])
+fit3a <- vglm(yfactor ~ x2, multinomial(refLevel = myrefLevel), dframe3)
+fit3b <- vglm(yfactor ~ x2, multinomial(refLevel = 2), dframe3)
coef(fit3a, matrix = TRUE) # "Treatment1" is the reference level
coef(fit3b, matrix = TRUE) # "Treatment1" is the reference level
margeff(fit3b)
@@ -256,7 +266,7 @@ margeff(fit3b)
# Example 4: Fit a rank-1 stereotype model
data(car.all)
-fit4 = rrvglm(Country ~ Width + Height + HP, multinomial, car.all)
+fit4 <- rrvglm(Country ~ Width + Height + HP, multinomial, car.all)
coef(fit4) # Contains the C matrix
constraints(fit4)$HP # The A matrix
coef(fit4, matrix = TRUE) # The B matrix
@@ -268,9 +278,9 @@ svd(coef(fit4, matrix = TRUE)[-1, ])$d # This has rank 1; = C %*% t(A)
# Example 5: The use of the xij argument (aka conditional logit model)
set.seed(111)
-nn = 100 # Number of people who travel to work
-M = 3 # There are M+1 models of transport to go to work
-ycounts = matrix(0, nn, M+1)
+nn <- 100 # Number of people who travel to work
+M <- 3 # There are M+1 models of transport to go to work
+ycounts <- matrix(0, nn, M+1)
ycounts[cbind(1:nn, sample(x = M+1, size = nn, replace = TRUE))] = 1
dimnames(ycounts) = list(NULL, c("bus","train","car","walk"))
gotowork = data.frame(cost.bus = runif(nn), time.bus = runif(nn),
@@ -287,13 +297,13 @@ gotowork = transform(gotowork,
Time.car = time.car - time.walk,
Time.train = time.train - time.walk,
Time = time.train - time.walk) # for labelling
-fit = vglm(ycounts ~ Cost + Time,
- multinomial(parall = TRUE ~ Cost + Time - 1),
- xij = list(Cost ~ Cost.bus + Cost.train + Cost.car,
- Time ~ Time.bus + Time.train + Time.car),
- form2 = ~ Cost + Cost.bus + Cost.train + Cost.car +
- Time + Time.bus + Time.train + Time.car,
- data=gotowork, trace = TRUE)
+fit <- vglm(ycounts ~ Cost + Time,
+ multinomial(parall = TRUE ~ Cost + Time - 1),
+ xij = list(Cost ~ Cost.bus + Cost.train + Cost.car,
+ Time ~ Time.bus + Time.train + Time.car),
+ form2 = ~ Cost + Cost.bus + Cost.train + Cost.car +
+ Time + Time.bus + Time.train + Time.car,
+ data = gotowork, trace = TRUE)
head(model.matrix(fit, type = "lm")) # LM model matrix
head(model.matrix(fit, type = "vlm")) # Big VLM model matrix
coef(fit)
@@ -309,9 +319,9 @@ max(abs(predict(fit) - predict(fit, new = gotowork))) # Should be 0
% 20100915; this no longer works:
% # Example 2c: Different input to Example 2a but same result
-% w = apply(ycounts, 1, sum) # Prior weights
-% yprop = ycounts / w # Sample proportions
-% fitprop = vglm(yprop ~ 1, multinomial, weights=w)
+% w <- apply(ycounts, 1, sum) # Prior weights
+% yprop <- ycounts / w # Sample proportions
+% fitprop <- vglm(yprop ~ 1, multinomial, weights=w)
% head(fitted(fitprop)) # Proportions
% weights(fitprop, type="prior", matrix=FALSE)
% fitprop at y # Same as the input
diff --git a/man/nakagami.Rd b/man/nakagami.Rd
index 61beee0..266328e 100644
--- a/man/nakagami.Rd
+++ b/man/nakagami.Rd
@@ -8,15 +8,12 @@
}
\usage{
-nakagami(lshape = "loge", lscale = "loge",
- eshape = list(), escale = list(),
- ishape = NULL, iscale = 1)
+nakagami(lshape = "loge", lscale = "loge", ishape = NULL, iscale = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{lshape, lscale, eshape, escale}{
- Parameter link functions and extra arguments
- applied to the
+ \item{lshape, lscale}{
+ Parameter link functions applied to the
\emph{shape} and \emph{scale} parameters.
Log links ensure they are positive.
See \code{\link{Links}} for more choices
@@ -95,15 +92,15 @@ nakagami(lshape = "loge", lscale = "loge",
}
\examples{
-nn = 1000; shape = exp(0); Scale = exp(1)
-ndata = data.frame(y1 = sqrt(rgamma(nn, shape = shape, scale = Scale/shape)))
-fit = vglm(y1 ~ 1, nakagami, ndata, trace = TRUE, crit = "c")
-ndata = transform(ndata, y2 = rnaka(nn, shape = shape, scale = Scale))
-fit = vglm(y2 ~ 1, nakagami(iscale = 3), ndata, trace = TRUE)
+nn <- 1000; shape <- exp(0); Scale <- exp(1)
+ndata <- data.frame(y1 = sqrt(rgamma(nn, shape = shape, scale = Scale/shape)))
+fit <- vglm(y1 ~ 1, nakagami, ndata, trace = TRUE, crit = "c")
+ndata <- transform(ndata, y2 = rnaka(nn, shape = shape, scale = Scale))
+fit <- vglm(y2 ~ 1, nakagami(iscale = 3), ndata, trace = TRUE)
head(fitted(fit))
with(ndata, mean(y2))
coef(fit, matrix = TRUE)
-(Cfit = Coef(fit))
+(Cfit <- Coef(fit))
\dontrun{ with(ndata,
hist(sy <- sort(y2), prob = TRUE, main = "", xlab = "y", ylim = c(0, 0.6)))
lines(dnaka(sy, shape = Cfit[1], scale = Cfit[2]) ~ sy, ndata, col = "orange") }
diff --git a/man/nakagamiUC.Rd b/man/nakagamiUC.Rd
index 88666b8..72d5fba 100644
--- a/man/nakagamiUC.Rd
+++ b/man/nakagamiUC.Rd
@@ -67,7 +67,7 @@ rnaka(n, shape, scale = 1, Smallno = 1.0e-6)
}
\examples{
-\dontrun{ x = seq(0, 3.2, len = 200)
+\dontrun{ x <- seq(0, 3.2, len = 200)
plot(x, dgamma(x, shape = 1), type = "n", col = "black", ylab = "",
ylim = c(0,1.5), main = "dnaka(x, shape)")
lines(x, dnaka(x, shape = 1), col = "orange")
@@ -84,8 +84,8 @@ lines(x, pnaka(x, shape = 3), col = "green")
legend(2, 0.6, col = c("orange","blue","green"), lty = rep(1, len = 3),
legend = paste("shape =", c(1, 2, 3))) }
-probs = seq(0.1, 0.9, by = 0.1)
-pnaka(qnaka(p = probs, shape = 2), shape = 2) - probs # Should be all 0
+probs <- seq(0.1, 0.9, by = 0.1)
+pnaka(qnaka(p = probs, shape = 2), shape = 2) - probs # Should be all 0
}
\keyword{distribution}
diff --git a/man/nbcanlink.Rd b/man/nbcanlink.Rd
index 5512601..5602550 100644
--- a/man/nbcanlink.Rd
+++ b/man/nbcanlink.Rd
@@ -8,8 +8,8 @@
}
\usage{
-nbcanlink(theta, earg = list(), inverse = FALSE, deriv = 0,
- short = TRUE, tag = FALSE)
+nbcanlink(theta, size = NULL, wrt.eta = NULL, bvalue = NULL,
+ inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -20,32 +20,29 @@ nbcanlink(theta, earg = list(), inverse = FALSE, deriv = 0,
}
- \item{earg}{
- List.
- Extra argument for passing in additional information.
- Here, a \code{size} component contains the \eqn{k} matrix which
+ \item{size, wrt.eta}{
+ \code{size} contains the \eqn{k} matrix which
must be of a conformable dimension as \code{theta}.
- Also, if \code{deriv > 0} then a \code{wrt.eta} component
- which is either 1 or 2 (1 for with respect to the first
+ Also, if \code{deriv > 0} then \code{wrt.eta}
+ is either 1 or 2 (1 for with respect to the first
linear predictor, and 2 for with respect to the second
linear predictor (a function of \eqn{k})).
}
- \item{inverse}{ Logical. If \code{TRUE} the inverse function is computed. }
- \item{deriv}{ Order of the derivative. Integer with value 0, 1 or 2. }
- \item{short}{
- Used for labelling the \code{blurb} slot of a
- \code{\link{vglmff-class}} object.
+
+
+ \item{bvalue}{
+ Details at \code{\link{Links}}.
+
}
- \item{tag}{
- Used for labelling the linear/additive predictor in the
- \code{initialize} slot of a \code{\link{vglmff-class}} object.
- Contains a little more information if \code{TRUE}.
+ \item{inverse, deriv, short, tag}{
+ Details at \code{\link{Links}}.
}
+
}
\details{
The negative binomial (NB) canonical link is
@@ -113,12 +110,10 @@ nbcanlink(theta, earg = list(), inverse = FALSE, deriv = 0,
Numerical instability may occur when \code{theta} is close to 0 or 1.
- For the \code{earg} argument,
- values of \code{theta} which are less than or equal to 0 can be
- replaced by the \code{bvalue} component of the list \code{earg}
+ Values of \code{theta} which are less than or equal to 0 can be
+ replaced by \code{bvalue}
before computing the link function value.
- The component name \code{bvalue} stands for ``boundary value''.
- See \code{\link{Links}} for general information about \code{earg}.
+ See \code{\link{Links}}.
@@ -133,27 +128,27 @@ nbcanlink(theta, earg = list(), inverse = FALSE, deriv = 0,
\examples{
nbcanlink("mu", short = FALSE)
-mymu = 1:10 # Test some basic operations:
-kmatrix = matrix(runif(length(mymu)), length(mymu), 1)
-eta1 = nbcanlink(mymu, earg = list(size = kmatrix))
-ans2 = nbcanlink(eta1, earg = list(size = kmatrix), inverse = TRUE)
+mymu <- 1:10 # Test some basic operations:
+kmatrix <- matrix(runif(length(mymu)), length(mymu), 1)
+eta1 <- nbcanlink(mymu, size = kmatrix)
+ans2 <- nbcanlink(eta1, size = kmatrix, inverse = TRUE)
max(abs(ans2 - mymu)) # Should be 0
-\dontrun{ mymu = c(seq(0.5, 10, length = 101))
-kmatrix = matrix(10, length(mymu), 1)
-plot(nbcanlink(mymu, earg = list(size = kmatrix)) ~ mymu, las = 1,
- type = "l", col = "blue", lwd = 1.5, xlab = expression({mu})) }
+\dontrun{ mymu <- c(seq(0.5, 10, length = 101))
+kmatrix <- matrix(10, length(mymu), 1)
+plot(nbcanlink(mymu, size = kmatrix) ~ mymu, las = 1,
+ type = "l", col = "blue", lwd = 1.5, xlab = expression({mu}))
# Estimate the parameters from some simulated data (see Warning section)
set.seed(123)
ndata <- data.frame(x2 = runif(nn <- 1000 ))
-size1 = exp(1); size2 = exp(2)
+size1 <- exp(1); size2 <- exp(2)
ndata <- transform(ndata, eta1 = -1 - 2 * x2, # eta1 < 0
size1 = size1,
size2 = size2)
ndata <- transform(ndata,
- mu1 = nbcanlink(eta1, earg = list(size = size1), inv = TRUE),
- mu2 = nbcanlink(eta1, earg = list(size = size2), inv = TRUE))
+ mu1 = nbcanlink(eta1, size = size1, inv = TRUE),
+ mu2 = nbcanlink(eta1, size = size2, inv = TRUE))
ndata <- transform(ndata, y1 = rnbinom(nn, mu = mu1, size = size1),
y2 = rnbinom(nn, mu = mu2, size = size2))
head(ndata)
@@ -165,6 +160,7 @@ fit <- vglm(cbind(y1, y2) ~ x2, negbinomial("nbcanlink", imethod = 3),
coef(fit, matrix = TRUE)
summary(fit)
}
+}
\keyword{math}
\keyword{models}
\keyword{regression}
diff --git a/man/nbolf.Rd b/man/nbolf.Rd
index 290c5f0..0570542 100644
--- a/man/nbolf.Rd
+++ b/man/nbolf.Rd
@@ -8,7 +8,7 @@
}
\usage{
-nbolf(theta, earg = stop("argument 'earg' must be given"),
+nbolf(theta, cutpoint = NULL, k = NULL,
inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
}
%- maybe also 'usage' for other objects documented here.
@@ -18,10 +18,8 @@ nbolf(theta, earg = stop("argument 'earg' must be given"),
See below for further details.
}
- \item{earg}{
- Extra argument for passing in additional information.
- This must be list with components \code{cutpoint}
- and \code{k}. Here, \code{k} is the \eqn{k} parameter associated
+ \item{cutpoint, k}{
+ Here, \code{k} is the \eqn{k} parameter associated
with the negative binomial distribution; see
\code{\link{negbinomial}}.
The cutpoints should be non-negative integers.
@@ -31,29 +29,13 @@ nbolf(theta, earg = stop("argument 'earg' must be given"),
}
- \item{inverse}{
- Logical. If \code{TRUE} the inverse function is computed.
-
- }
- \item{deriv}{
- Order of the derivative. Integer with value 0, 1 or 2.
-
-
- }
- \item{short}{
- Used for labelling the \code{blurb} slot of a
- \code{\link{vglmff-class}} object.
+ \item{inverse, deriv, short, tag}{
+ Details at \code{\link{Links}}.
}
- \item{tag}{
- Used for labelling the linear/additive predictor in the
- \code{initialize} slot of a \code{\link{vglmff-class}} object.
- Contains a little more information if \code{TRUE}.
-
- }
}
\details{
The negative binomial-ordinal link function (NBOLF) can be applied to
@@ -63,10 +45,6 @@ nbolf(theta, earg = stop("argument 'earg' must be given"),
distribution.
- The arguments \code{short} and \code{tag} are used only if
- \code{theta} is character.
-
-
See \code{\link{Links}} for general information about \pkg{VGAM}
link functions.
@@ -120,14 +98,13 @@ nbolf(theta, earg = stop("argument 'earg' must be given"),
}
\examples{
-earg = list(cutpoint = 2, k = 1)
-nbolf("p", earg = earg, short = FALSE)
-nbolf("p", earg = earg, tag = TRUE)
+nbolf("p", cutpoint = 2, k = 1, short = FALSE)
+nbolf("p", cutpoint = 2, k = 1, tag = TRUE)
-p = seq(0.02, 0.98, by = 0.01)
-y = nbolf(p, earg = earg)
-y. = nbolf(p, earg = earg, deriv = 1)
-max(abs(nbolf(y, earg = earg, inv = TRUE) - p)) # Should be 0
+p <- seq(0.02, 0.98, by = 0.01)
+y <- nbolf(p,cutpoint = 2, k = 1)
+y. <- nbolf(p,cutpoint = 2, k = 1, deriv = 1)
+max(abs(nbolf(y,cutpoint = 2, k = 1, inv = TRUE) - p)) # Should be 0
\dontrun{ par(mfrow = c(2, 1), las = 1)
plot(p, y, type = "l", col = "blue", main = "nbolf()")
@@ -137,27 +114,27 @@ plot(p, y., type = "l", col = "blue",
main = "(Reciprocal of) first NBOLF derivative") }
# Another example
-nn = 1000
-x2 = sort(runif(nn))
-x3 = runif(nn)
-mymu = exp( 3 + 1 * x2 - 2 * x3)
-k = 4
-y1 = rnbinom(nn, mu = mymu, size = k)
-cutpoints = c(-Inf, 10, 20, Inf)
-cuty = Cut(y1, breaks = cutpoints)
+nn <- 1000
+x2 <- sort(runif(nn))
+x3 <- runif(nn)
+mymu <- exp( 3 + 1 * x2 - 2 * x3)
+k <- 4
+y1 <- rnbinom(nn, mu = mymu, size = k)
+cutpoints <- c(-Inf, 10, 20, Inf)
+cuty <- Cut(y1, breaks = cutpoints)
\dontrun{ plot(x2, x3, col = cuty, pch = as.character(cuty)) }
table(cuty) / sum(table(cuty))
-fit = vglm(cuty ~ x2 + x3, fam = cumulative(link = "nbolf",
- reverse = TRUE, parallel = TRUE, intercept.apply = TRUE,
- mv = TRUE, earg = list(cutpoint = cutpoints[2:3], k = k)),
- trace = TRUE)
-head(fit at y)
+fit <- vglm(cuty ~ x2 + x3, cumulative(reverse = TRUE,
+ parallel = TRUE, intercept.apply = TRUE,
+ link = nbolf(cutpoint = cutpoints[2:3], k = k),
+ mv = TRUE), trace = TRUE)
+head(depvar(fit))
head(fitted(fit))
head(predict(fit))
coef(fit)
coef(fit, matrix = TRUE)
constraints(fit)
-fit at misc$earg
+fit at misc
}
\keyword{math}
\keyword{models}
diff --git a/man/negbinomial.Rd b/man/negbinomial.Rd
index fface96..8b5c33f 100644
--- a/man/negbinomial.Rd
+++ b/man/negbinomial.Rd
@@ -9,13 +9,13 @@
}
\usage{
-negbinomial(lmu = "loge", lsize = "loge", emu = list(), esize = list(),
- imu = NULL, isize = NULL, quantile.probs = 0.75,
+negbinomial(lmu = "loge", lsize = "loge",
+ imu = NULL, isize = NULL, probs.y = 0.75,
nsimEIM = 100, cutoff = 0.995,
Maxiter = 5000, deviance.arg = FALSE, imethod = 1,
parallel = FALSE, shrinkage.init = 0.95, zero = -2)
-polya(lprob = "logit", lsize = "loge", eprob = list(), esize = list(),
- iprob = NULL, isize = NULL, quantile.probs = 0.75, nsimEIM = 100,
+polya(lprob = "logit", lsize = "loge",
+ iprob = NULL, isize = NULL, probs.y = 0.75, nsimEIM = 100,
deviance.arg = FALSE, imethod = 1, shrinkage.init = 0.95, zero = -2)
}
%- maybe also 'usage' for other objects documented here.
@@ -32,10 +32,6 @@ polya(lprob = "logit", lsize = "loge", eprob = list(), esize = list(),
\code{\link{nloge}} and
\code{\link{reciprocal}}.
- }
- \item{emu, esize, eprob}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
}
\item{imu, isize, iprob}{
@@ -48,12 +44,14 @@ polya(lprob = "logit", lsize = "loge", eprob = list(), esize = list(),
The last argument is ignored if used within \code{\link{cqo}}; see
the \code{iKvector} argument of \code{\link{qrrvglm.control}} instead.
+
}
- \item{quantile.probs}{
+ \item{probs.y}{
Passed into the \code{probs} argument
of \code{\link[stats:quantile]{quantile}}
when \code{imethod = 3} to obtain an initial value for the mean.
+
}
\item{nsimEIM}{
@@ -63,6 +61,7 @@ polya(lprob = "logit", lsize = "loge", eprob = list(), esize = list(),
See \code{\link{CommonVGAMffArguments}} for more information
and the note below.
+
}
\item{cutoff}{
Used in the finite series approximation.
@@ -76,6 +75,7 @@ polya(lprob = "logit", lsize = "loge", eprob = list(), esize = list(),
It is like specifying \code{p} in an imaginary function
\code{qnegbin(p)}.
+
}
\item{Maxiter}{
Used in the finite series approximation.
@@ -84,6 +84,7 @@ polya(lprob = "logit", lsize = "loge", eprob = list(), esize = list(),
In theory, the value involves an infinite series.
If this argument is too small then the value may be inaccurate.
+
}
\item{deviance.arg}{
Logical. If \code{TRUE}, the deviance function is attached
@@ -94,6 +95,7 @@ polya(lprob = "logit", lsize = "loge", eprob = list(), esize = list(),
IRLS algorithm. It should be set \code{TRUE} only when
used with \code{\link{cqo}} under the fast algorithm.
+
}
\item{imethod}{
An integer with value \code{1} or \code{2} or \code{3} which
@@ -102,6 +104,7 @@ polya(lprob = "logit", lsize = "loge", eprob = list(), esize = list(),
and/or else specify a value for \code{shrinkage.init}
and/or else specify a value for \code{isize}.
+
}
\item{parallel}{
See \code{\link{CommonVGAMffArguments}} for more information.
@@ -112,6 +115,7 @@ polya(lprob = "logit", lsize = "loge", eprob = list(), esize = list(),
You should set \code{zero = NULL} too if \code{parallel = TRUE} to
avoid a conflict.
+
}
\item{shrinkage.init}{
How much shrinkage is used when initializing \eqn{\mu}{mu}.
@@ -121,6 +125,7 @@ polya(lprob = "logit", lsize = "loge", eprob = list(), esize = list(),
This argument is used in conjunction with \code{imethod}.
If convergence failure occurs try setting this argument to 1.
+
}
\item{zero}{
Integer valued vector, usually assigned \eqn{-2} or \eqn{2} if used
@@ -134,6 +139,7 @@ polya(lprob = "logit", lsize = "loge", eprob = list(), esize = list(),
are intercept-only.
See \code{\link{CommonVGAMffArguments}} for more information.
+
}
}
@@ -235,6 +241,7 @@ polya(lprob = "logit", lsize = "loge", eprob = list(), esize = list(),
\code{\link{rrvglm}}
and \code{\link{vgam}}.
+
}
\references{
Lawless, J. F. (1987)
@@ -403,13 +410,13 @@ coef(fit1, matrix = TRUE)
# Example 3: large counts so definitely use the nsimEIM argument
ndata <- transform(ndata, y3 = rnbinom(nn, mu = exp(12+x2), size = exp(1)))
-with(ndata, range(y3)) # Large counts
+with(ndata, range(y3)) # Large counts
fit2 <- vglm(y3 ~ x2, negbinomial(nsimEIM = 100), ndata, trace = TRUE)
coef(fit2, matrix = TRUE)
# Example 4: a NB-1 to estimate a negative binomial with Var(Y) = phi0 * mu
-nn <- 1000 # Number of observations
-phi0 <- 10 # Specify this; should be greater than unity
+nn <- 1000 # Number of observations
+phi0 <- 10 # Specify this; should be greater than unity
delta0 <- 1 / (phi0 - 1)
mydata <- data.frame(x2 = runif(nn), x3 = runif(nn))
mydata <- transform(mydata, mu = exp(2 + 3 * x2 + 0 * x3))
@@ -421,9 +428,10 @@ nb1 <- vglm(y3 ~ x2 + x3, negbinomial(parallel = TRUE, zero = NULL),
mydata, trace = TRUE)
# Extracting out some quantities:
cnb1 <- coef(nb1, matrix = TRUE)
-mydiff <- (cnb1["(Intercept)", "log(size)"] - cnb1["(Intercept)", "log(mu)"])
+mydiff <- (cnb1["(Intercept)", "log(size)"] -
+ cnb1["(Intercept)", "log(mu)"])
delta0.hat <- exp(mydiff)
-(phi.hat <- 1 + 1 / delta0.hat) # MLE of phi
+(phi.hat <- 1 + 1 / delta0.hat) # MLE of phi
summary(nb1)
# Obtain a 95 percent confidence interval for phi0:
myvec <- rbind(-1, 1, 0, 0)
diff --git a/man/negbinomial.size.Rd b/man/negbinomial.size.Rd
index c2caea0..5631d52 100644
--- a/man/negbinomial.size.Rd
+++ b/man/negbinomial.size.Rd
@@ -8,8 +8,8 @@
}
\usage{
-negbinomial.size(size = Inf, lmu = "loge", emu = list(), imu = NULL,
- quantile.probs = 0.75, imethod = 1,
+negbinomial.size(size = Inf, lmu = "loge", imu = NULL,
+ probs.y = 0.75, imethod = 1,
shrinkage.init = 0.95, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -24,12 +24,12 @@ negbinomial.size(size = Inf, lmu = "loge", emu = list(), imu = NULL,
}
- \item{lmu, emu, imu}{
+ \item{lmu, imu}{
Same as \code{\link{negbinomial}}.
}
- \item{quantile.probs}{
+ \item{probs.y}{
Same as \code{\link{negbinomial}}.
@@ -86,8 +86,12 @@ Cambridge: Cambridge University Press.
\author{ Thomas W. Yee }
\note{
If \code{lmu = "nbcanlink"} in \code{negbinomial.size()} then
- the \code{size} argument here is placed inside the \code{earg}
- argument of \code{nbcanlink()} as a matrix with conformable size.
+ the \code{size} argument here should be assigned.
+
+
+
+% is placed inside the \code{earg}
+% argument of \code{nbcanlink()} as a matrix with conformable size.
}
@@ -104,12 +108,12 @@ Cambridge: Cambridge University Press.
}
\examples{
# Simulated data with various multiple responses
-size1 = exp(1); size2 = exp(2); size3 = exp(0); size4 = Inf
+size1 <- exp(1); size2 <- exp(2); size3 <- exp(0); size4 <- Inf
ndata <- data.frame(x2 = runif(nn <- 1000))
ndata <- transform(ndata, eta1 = -1 - 2 * x2, # eta1 must be negative
size1 = size1)
ndata <- transform(ndata,
- mu1 = nbcanlink(eta1, earg = list(size = size1), inv = TRUE))
+ mu1 = nbcanlink(eta1, size = size1, inv = TRUE))
ndata <- transform(ndata,
y1 = rnbinom(nn, mu = mu1, size = size1), # NB-C
y2 = rnbinom(nn, mu = exp(2 - x2), size = size2),
diff --git a/man/normal1.Rd b/man/normal1.Rd
index d9057c7..af9ca19 100644
--- a/man/normal1.Rd
+++ b/man/normal1.Rd
@@ -9,7 +9,6 @@
}
\usage{
normal1(lmean = "identity", lsd = "loge", lvar = "loge",
- emean = list(), esd = list(), evar = list(),
var.arg = FALSE, imethod = 1, isd = NULL, parallel = FALSE,
intercept.apply = FALSE, zero = -2)
}
@@ -23,12 +22,15 @@ normal1(lmean = "identity", lsd = "loge", lvar = "loge",
}
- \item{emean, esd, evar}{
- List. Extra argument for the links.
- See \code{earg} in \code{\link{Links}} for general information.
- }
+% \item{emean, esd, evar}{
+% List. Extra argument for the links.
+% See \code{earg} in \code{\link{Links}} for general information.
+% emean = list(), esd = list(), evar = list(),
+% }
+
+
\item{var.arg}{
Logical.
If \code{TRUE} then the second parameter is the variance and
@@ -76,8 +78,8 @@ normal1(lmean = "identity", lsd = "loge", lvar = "loge",
\author{ T. W. Yee }
\note{
- Yet to do: allow an argument such as \code{sameSD} that enables the
- standard devations to be the same.
+ Yet to do: allow an argument such as \code{eq.sd} that enables
+ the standard devations to be the same.
}
@@ -85,6 +87,7 @@ normal1(lmean = "identity", lsd = "loge", lvar = "loge",
\code{\link{gaussianff}},
\code{\link{posnormal1}},
\code{\link{mix2normal1}},
+% \code{\link{normal1sum1}},
\code{\link{Qvar}},
\code{\link{tobit}},
\code{\link{cennormal1}},
@@ -114,9 +117,12 @@ coef(fit2, matrix = TRUE)
# Generate data from N(mu = theta = 10, sigma = theta) and estimate theta.
theta <- 10
ndata <- data.frame(y = rnorm(100, m = theta, sd = theta))
-fit <- vglm(y ~ 1, normal1(lsd = "identity"), ndata,
- constraints = list("(Intercept)" = rbind(1, 1)))
-coef(fit, matrix = TRUE)
+fit3 <- vglm(y ~ 1, normal1(lsd = "identity"), ndata,
+ constraints = list("(Intercept)" = rbind(1, 1)))
+fit4 <- vglm(y ~ 1, normal1(lsd = "identity", parallel = TRUE,
+ intercept.apply = TRUE, zero = NULL), ndata)
+coef(fit3, matrix = TRUE)
+coef(fit4, matrix = TRUE) # Same as fit3
}
\keyword{models}
\keyword{regression}
diff --git a/man/notdocumentedyet.Rd b/man/notdocumentedyet.Rd
index 9bce2aa..882a327 100644
--- a/man/notdocumentedyet.Rd
+++ b/man/notdocumentedyet.Rd
@@ -2,6 +2,26 @@
\alias{notdocumentedyet}
%
%
+%
+%
+% 20120813 New links (no earg)
+\alias{Dtheta.deta}
+\alias{D2theta.deta2}
+\alias{Eta2theta}
+\alias{Theta2eta}
+\alias{link2list}
+\alias{Namesof}
+%
+%
+%
+%
+% 20120514, 20120528,
+\alias{w.wz.merge}
+\alias{w.y.check}
+\alias{vweighted.mean.default}
+%
+% 20120418
+\alias{nvar_vlm}
% 20120310
%\alias{hatvalues}
%\alias{hatvalues.vlm}
@@ -120,8 +140,8 @@
%
%
%20101222; Alfian work
-%\alias{Rcam} % Has been written
-%\alias{plotrcam0} % Has been written
+%\alias{Rcim} % Has been written
+%\alias{plotrcim0} % Has been written
%\alias{moffset} % Has been written
% \alias{Qvar}
\alias{plotqvar}
@@ -196,7 +216,7 @@
% \alias{attrassign}
% \alias{attrassigndefault}
% \alias{attrassignlm}
-\alias{beta4}
+% \alias{beta4}
% \alias{betaffqn}
\alias{biplot}
\alias{biplot.qrrvglm}
@@ -230,7 +250,7 @@
\alias{coefvlm}
\alias{coefvsmooth.spline}
\alias{coefvsmooth.spline.fit}
-\alias{constraints.vlm}
+% \alias{constraints.vlm}
% \alias{cqo.fit}
\alias{d2theta.deta2}
% \alias{dcda.fast}
@@ -365,7 +385,7 @@
\alias{nvar.rrvglm}
\alias{nvar.qrrvglm}
\alias{nvar.cao}
-\alias{nvar.rcam}
+\alias{nvar.rcim}
\alias{ns}
% \alias{num.deriv.rrr}
\alias{persp}
@@ -464,7 +484,7 @@
\alias{summary.lms}
\alias{summary.qrrvglm}
\alias{summary.rc.exponential}
-\alias{summaryrcam}
+\alias{summaryrcim}
\alias{summary.rrvglm}
\alias{summary.uqo}
\alias{summaryvgam}
@@ -530,8 +550,8 @@
%
\alias{Coef.uqo-class}
\alias{cao-class}
-\alias{rcam0-class}
-\alias{rcam-class}
+\alias{rcim0-class}
+\alias{rcim-class}
\alias{grc-class}
\alias{qrrvglm-class}
\alias{summary.qrrvglm-class}
@@ -539,9 +559,9 @@
\alias{summary.vgam-class}
\alias{summary.vglm-class}
\alias{summary.vlm-class}
-%%% 20101216 \alias{summary.rcam-class}
-%\alias{summary.rcam-class}
-%\alias{summaryrcam-class}
+%%% 20101216 \alias{summary.rcim-class}
+%\alias{summary.rcim-class}
+%\alias{summaryrcim-class}
\alias{uqo-class}
\alias{vcov.qrrvglm-class}
\alias{vlm-class}
diff --git a/man/ordpoisson.Rd b/man/ordpoisson.Rd
index 7ff3acf..a6afba8 100644
--- a/man/ordpoisson.Rd
+++ b/man/ordpoisson.Rd
@@ -10,7 +10,7 @@
\usage{
ordpoisson(cutpoints, countdata = FALSE, NOS = NULL,
Levels = NULL, init.mu = NULL, parallel = FALSE,
- zero = NULL, link = "loge", earg = list())
+ zero = NULL, link = "loge")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -27,7 +27,7 @@ ordpoisson(cutpoints, countdata = FALSE, NOS = NULL,
\code{2}, \ldots, \code{L}, say, where \code{L} is the number of
levels. Such input can be generated with \code{\link[base]{cut}}
with argument \code{labels = FALSE}. If \code{countdata = TRUE} then
- the response is expected to be in the same format as \code{fit at y}
+ the response is expected to be in the same format as \code{depvar(fit)}
where \code{fit} is a fitted model with \code{ordpoisson} as the
\pkg{VGAM} family function. That is, the response is matrix of counts
with \code{L} columns (if \code{NOS = 1}).
@@ -54,7 +54,7 @@ ordpoisson(cutpoints, countdata = FALSE, NOS = NULL,
default is to compute an initial value internally).
}
- \item{parallel, zero, link, earg}{
+ \item{parallel, zero, link}{
See \code{\link{poissonff}}.
}
@@ -115,7 +115,7 @@ ordpoisson(cutpoints, countdata = FALSE, NOS = NULL,
\section{Warning }{
The input requires care as little to no checking is done.
If \code{fit} is the fitted object, have a look at \code{fit at extra} and
- \code{fit at y} to check.
+ \code{depvar(fit)} to check.
}
@@ -128,38 +128,38 @@ ordpoisson(cutpoints, countdata = FALSE, NOS = NULL,
}
\examples{
-set.seed(123) # Example 1
-x2 = runif(n <- 1000); x3 = runif(n)
-mymu = exp(3 - 1 * x2 + 2 * x3)
-y1 = rpois(n, lambda = mymu)
-cutpts = c(-Inf, 20, 30, Inf)
-fcutpts = cutpts[is.finite(cutpts)] # finite cutpoints
-ystar = cut(y1, breaks = cutpts, labels = FALSE)
+set.seed(123) # Example 1
+x2 <- runif(n <- 1000); x3 <- runif(n)
+mymu <- exp(3 - 1 * x2 + 2 * x3)
+y1 <- rpois(n, lambda = mymu)
+cutpts <- c(-Inf, 20, 30, Inf)
+fcutpts <- cutpts[is.finite(cutpts)] # finite cutpoints
+ystar <- cut(y1, breaks = cutpts, labels = FALSE)
\dontrun{
plot(x2, x3, col = ystar, pch = as.character(ystar))
}
table(ystar) / sum(table(ystar))
-fit = vglm(ystar ~ x2 + x3, fam = ordpoisson(cutpoi = fcutpts))
-head(fit at y) # This can be input if countdata = TRUE
+fit <- vglm(ystar ~ x2 + x3, fam = ordpoisson(cutpoi = fcutpts))
+head(depvar(fit)) # This can be input if countdata = TRUE
head(fitted(fit))
head(predict(fit))
coef(fit, matrix = TRUE)
fit at extra
# Example 2: multivariate and there are no obsns between some cutpoints
-cutpts2 = c(-Inf, 0, 9, 10, 20, 70, 200, 201, Inf)
-fcutpts2 = cutpts2[is.finite(cutpts2)] # finite cutpoints
-y2 = rpois(n, lambda = mymu) # Same model as y1
-ystar2 = cut(y2, breaks = cutpts2, labels = FALSE)
+cutpts2 <- c(-Inf, 0, 9, 10, 20, 70, 200, 201, Inf)
+fcutpts2 <- cutpts2[is.finite(cutpts2)] # finite cutpoints
+y2 <- rpois(n, lambda = mymu) # Same model as y1
+ystar2 <- cut(y2, breaks = cutpts2, labels = FALSE)
table(ystar2) / sum(table(ystar2))
-fit = vglm(cbind(ystar,ystar2) ~ x2 + x3, fam =
- ordpoisson(cutpoi = c(fcutpts,Inf,fcutpts2,Inf),
- Levels = c(length(fcutpts)+1,length(fcutpts2)+1),
- parallel = TRUE), trace = TRUE)
+fit <- vglm(cbind(ystar,ystar2) ~ x2 + x3, fam =
+ ordpoisson(cutpoi = c(fcutpts,Inf,fcutpts2,Inf),
+ Levels = c(length(fcutpts)+1,length(fcutpts2)+1),
+ parallel = TRUE), trace = TRUE)
coef(fit, matrix = TRUE)
fit at extra
constraints(fit)
-summary(fit at y) # Some columns have all zeros
+summary(depvar(fit)) # Some columns have all zeros
}
\keyword{math}
\keyword{models}
diff --git a/man/oxtemp.Rd b/man/oxtemp.Rd
index c59c342..74439d4 100644
--- a/man/oxtemp.Rd
+++ b/man/oxtemp.Rd
@@ -27,7 +27,7 @@
% \references{
% }
\examples{
-fit = vglm(maxtemp ~ 1, egev, data = oxtemp, trace = TRUE)
+fit <- vglm(maxtemp ~ 1, egev, data = oxtemp, trace = TRUE)
}
\keyword{datasets}
diff --git a/man/paralogistic.Rd b/man/paralogistic.Rd
index 42178bd..c9681f6 100644
--- a/man/paralogistic.Rd
+++ b/man/paralogistic.Rd
@@ -7,8 +7,8 @@
paralogistic distribution.
}
\usage{
-paralogistic(lshape1.a = "loge", lscale = "loge", eshape1.a = list(),
- escale = list(), ishape1.a = 2, iscale = NULL, zero = NULL)
+paralogistic(lshape1.a = "loge", lscale = "loge",
+ ishape1.a = 2, iscale = NULL, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -19,11 +19,6 @@ paralogistic(lshape1.a = "loge", lscale = "loge", eshape1.a = list(),
See \code{\link{Links}} for more choices.
}
- \item{eshape1.a, escale}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{ishape1.a, iscale}{
Optional initial values for \code{a} and \code{scale}.
@@ -93,10 +88,10 @@ Hoboken, NJ, USA: Wiley-Interscience.
}
\examples{
-pdata = data.frame(y = rparalogistic(n = 3000, exp(1), exp(2)))
-fit = vglm(y ~ 1, paralogistic, pdata, trace = TRUE)
-fit = vglm(y ~ 1, paralogistic(ishape1.a = 2.3, iscale = 7),
- pdata, trace = TRUE, epsilon = 1e-8)
+pdata <- data.frame(y = rparalogistic(n = 3000, exp(1), exp(2)))
+fit <- vglm(y ~ 1, paralogistic, pdata, trace = TRUE)
+fit <- vglm(y ~ 1, paralogistic(ishape1.a = 2.3, iscale = 7),
+ pdata, trace = TRUE, epsilon = 1e-8)
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/paralogisticUC.Rd b/man/paralogisticUC.Rd
index 03d41cb..593741c 100644
--- a/man/paralogisticUC.Rd
+++ b/man/paralogisticUC.Rd
@@ -9,6 +9,7 @@
Density, distribution function, quantile function and random
generation for the paralogistic distribution with shape parameter \code{a}
and scale parameter \code{scale}.
+
}
\usage{
dparalogistic(x, shape1.a, scale = 1, log = FALSE)
@@ -44,6 +45,7 @@ Kleiber, C. and Kotz, S. (2003)
Actuarial Sciences},
Hoboken, NJ, USA: Wiley-Interscience.
+
}
\author{ T. W. Yee }
\details{
@@ -62,10 +64,11 @@ Hoboken, NJ, USA: Wiley-Interscience.
\code{\link{paralogistic}},
\code{\link{genbetaII}}.
+
}
\examples{
-pdata = data.frame(y = rparalogistic(n = 3000, 4, 6))
-fit = vglm(y ~ 1, paralogistic(ishape1.a = 2.1), pdata, trace = TRUE)
+pdata <- data.frame(y = rparalogistic(n = 3000, 4, 6))
+fit <- vglm(y ~ 1, paralogistic(ishape1.a = 2.1), pdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
}
diff --git a/man/pareto1.Rd b/man/pareto1.Rd
index a237227..2d16db3 100644
--- a/man/pareto1.Rd
+++ b/man/pareto1.Rd
@@ -10,9 +10,8 @@
}
\usage{
-pareto1(lshape = "loge", earg = list(), location = NULL)
-tpareto1(lower, upper, lshape = "loge", earg = list(), ishape = NULL,
- imethod = 1)
+ pareto1(lshape = "loge", location = NULL)
+tpareto1(lower, upper, lshape = "loge", ishape = NULL, imethod = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -23,12 +22,6 @@ tpareto1(lower, upper, lshape = "loge", earg = list(), ishape = NULL,
}
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
-
-
- }
\item{lower, upper}{
Numeric.
Lower and upper limits for the truncated Pareto distribution.
@@ -98,6 +91,7 @@ tpareto1(lower, upper, lshape = "loge", earg = list(), ishape = NULL,
[(1-k)(1-(\alpha/U)^k)]}{
k * lower^k * (U^(1-k)-alpha^(1-k)) / ((1-k) * (1-(alpha/U)^k))}.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -172,27 +166,27 @@ tpareto1(lower, upper, lshape = "loge", earg = list(), ishape = NULL,
}
\examples{
-alpha = 2; kay = exp(3)
-pdat = data.frame(y = rpareto(n = 1000, location = alpha, shape = kay))
-fit = vglm(y ~ 1, pareto1, pdat, trace = TRUE)
-fit at extra # The estimate of alpha is here
+alpha <- 2; kay <- exp(3)
+pdat <- data.frame(y = rpareto(n = 1000, location = alpha, shape = kay))
+fit <- vglm(y ~ 1, pareto1, pdat, trace = TRUE)
+fit at extra # The estimate of alpha is here
head(fitted(fit))
with(pdat, mean(y))
coef(fit, matrix = TRUE)
-summary(fit) # Standard errors are incorrect!!
+summary(fit) # Standard errors are incorrect!!
# Here, alpha is assumed known
-fit2 = vglm(y ~ 1, pareto1(location = alpha), pdat, trace = TRUE, crit = "coef")
-fit2 at extra # alpha stored here
+fit2 <- vglm(y ~ 1, pareto1(location = alpha), pdat, trace = TRUE, crit = "coef")
+fit2 at extra # alpha stored here
head(fitted(fit2))
coef(fit2, matrix = TRUE)
-summary(fit2) # Standard errors are okay
+summary(fit2) # Standard errors are okay
# Upper truncated Pareto distribution
-lower = 2; upper = 8; kay = exp(2)
-pdat3 = data.frame(y = rtpareto(n = 100, lower = lower,
+lower <- 2; upper <- 8; kay <- exp(2)
+pdat3 <- data.frame(y = rtpareto(n = 100, lower = lower,
upper = upper, shape = kay))
-fit3 = vglm(y ~ 1, tpareto1(lower, upper), pdat3, trace = TRUE, cri = "coef")
+fit3 <- vglm(y ~ 1, tpareto1(lower, upper), pdat3, trace = TRUE, cri = "coef")
coef(fit3, matrix = TRUE)
c(fit3 at misc$lower, fit3 at misc$upper)
}
diff --git a/man/paretoIV.Rd b/man/paretoIV.Rd
index 4603b5c..05d37f7 100644
--- a/man/paretoIV.Rd
+++ b/man/paretoIV.Rd
@@ -12,13 +12,10 @@
}
\usage{
paretoIV(location = 0, lscale = "loge", linequality = "loge", lshape = "loge",
- escale = list(), einequality = list(), eshape = list(),
iscale = 1, iinequality = 1, ishape = NULL, imethod = 1)
paretoIII(location = 0, lscale = "loge", linequality = "loge",
- escale = list(), einequality = list(),
iscale = NULL, iinequality = NULL)
paretoII(location = 0, lscale = "loge", lshape = "loge",
- escale = list(), eshape = list(),
iscale = NULL, ishape = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -38,11 +35,6 @@ paretoII(location = 0, lscale = "loge", lshape = "loge",
positive.
}
- \item{escale, einequality, eshape}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{iscale, iinequality, ishape}{
Initial values for the parameters.
A \code{NULL} value means that it is obtained internally.
@@ -165,10 +157,10 @@ Fairland, Maryland: International Cooperative Publishing House.
}
\examples{
-pdata = data.frame(y = rparetoIV(2000, scal = exp(1),
- ineq = exp(-0.3), shape = exp(1)))
-\dontrun{par(mfrow = c(2,1)); with(pdata, hist(y)); with(pdata, hist(log(y))) }
-fit = vglm(y ~ 1, paretoIV, pdata, trace = TRUE)
+pdata <- data.frame(y = rparetoIV(2000, scale = exp(1),
+ ineq = exp(-0.3), shape = exp(1)))
+\dontrun{par(mfrow = c(2, 1)); with(pdata, hist(y)); with(pdata, hist(log(y))) }
+fit <- vglm(y ~ 1, paretoIV, pdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/paretoIVUC.Rd b/man/paretoIVUC.Rd
index 20cab72..141f05e 100644
--- a/man/paretoIVUC.Rd
+++ b/man/paretoIVUC.Rd
@@ -26,22 +26,22 @@
}
\usage{
-dparetoIV(x, location=0, scale=1, inequality=1, shape=1, log=FALSE)
-pparetoIV(q, location=0, scale=1, inequality=1, shape=1)
-qparetoIV(p, location=0, scale=1, inequality=1, shape=1)
-rparetoIV(n, location=0, scale=1, inequality=1, shape=1)
-dparetoIII(x, location=0, scale=1, inequality=1, log=FALSE)
-pparetoIII(q, location=0, scale=1, inequality=1)
-qparetoIII(p, location=0, scale=1, inequality=1)
-rparetoIII(n, location=0, scale=1, inequality=1)
-dparetoII(x, location=0, scale=1, shape=1, log=FALSE)
-pparetoII(q, location=0, scale=1, shape=1)
-qparetoII(p, location=0, scale=1, shape=1)
-rparetoII(n, location=0, scale=1, shape=1)
-dparetoI(x, scale=1, shape=1)
-pparetoI(q, scale=1, shape=1)
-qparetoI(p, scale=1, shape=1)
-rparetoI(n, scale=1, shape=1)
+dparetoIV(x, location = 0, scale = 1, inequality = 1, shape = 1, log = FALSE)
+pparetoIV(q, location = 0, scale = 1, inequality = 1, shape = 1)
+qparetoIV(p, location = 0, scale = 1, inequality = 1, shape = 1)
+rparetoIV(n, location = 0, scale = 1, inequality = 1, shape = 1)
+dparetoIII(x, location = 0, scale = 1, inequality = 1, log = FALSE)
+pparetoIII(q, location = 0, scale = 1, inequality = 1)
+qparetoIII(p, location = 0, scale = 1, inequality = 1)
+rparetoIII(n, location = 0, scale = 1, inequality = 1)
+dparetoII(x, location = 0, scale = 1, shape = 1, log = FALSE)
+pparetoII(q, location = 0, scale = 1, shape = 1)
+qparetoII(p, location = 0, scale = 1, shape = 1)
+rparetoII(n, location = 0, scale = 1, shape = 1)
+dparetoI(x, scale = 1, shape = 1)
+pparetoI(q, scale = 1, shape = 1)
+qparetoI(p, scale = 1, shape = 1)
+rparetoI(n, scale = 1, shape = 1)
}
\arguments{
\item{x, q}{vector of quantiles. }
@@ -52,7 +52,7 @@ rparetoI(n, scale=1, shape=1)
inequality and shape parameters. }
\item{log}{
Logical.
- If \code{log=TRUE} then the logarithm of the density is returned.
+ If \code{log = TRUE} then the logarithm of the density is returned.
}
@@ -80,29 +80,33 @@ Fairland, Maryland: International Cooperative Publishing House.
For the formulas and other details
see \code{\link{paretoIV}}.
+
}
\note{
The functions \code{[dpqr]paretoI} are the same as \code{[dpqr]pareto1}
except for a slight change in notation: \eqn{s=k} and
\eqn{b=\alpha}{b=alpha}; see \code{\link{Pareto}}.
+
}
\seealso{
\code{\link{paretoIV}},
\code{\link{Pareto}}.
+
+
}
\examples{
\dontrun{
-x = seq(-0.2, 4, by=0.01)
-loc = 0; Scale = 1; ineq = 1; shape = 1.0;
-plot(x, dparetoIV(x, loc, Scale, ineq, shape), type="l", col="blue",
- main="Blue is density, red is cumulative distribution function",
- sub="Purple are 5,10,...,95 percentiles", ylim=0:1, las=1, ylab="")
-abline(h=0, col="blue", lty=2)
-Q = qparetoIV(seq(0.05,0.95,by=0.05), loc, Scale, ineq, shape)
-lines(Q, dparetoIV(Q, loc, Scale, ineq, shape), col="purple", lty=3, type="h")
-lines(x, pparetoIV(x, loc, Scale, ineq, shape), col="red")
-abline(h=0, lty=2)
+x <- seq(-0.2, 4, by = 0.01)
+loc <- 0; Scale <- 1; ineq <- 1; shape <- 1.0;
+plot(x, dparetoIV(x, loc, Scale, ineq, shape), type = "l", col = "blue",
+ main = "Blue is density, orange is cumulative distribution function",
+ sub = "Purple are 5,10,...,95 percentiles", ylim = 0:1, las = 1, ylab = "")
+abline(h = 0, col = "blue", lty = 2)
+Q <- qparetoIV(seq(0.05,0.95,by = 0.05), loc, Scale, ineq, shape)
+lines(Q, dparetoIV(Q, loc, Scale, ineq, shape), col = "purple", lty = 3, type = "h")
+lines(x, pparetoIV(x, loc, Scale, ineq, shape), col = "orange")
+abline(h = 0, lty = 2)
}
}
\keyword{distribution}
diff --git a/man/perks.Rd b/man/perks.Rd
new file mode 100644
index 0000000..8e77348
--- /dev/null
+++ b/man/perks.Rd
@@ -0,0 +1,143 @@
+\name{perks}
+\alias{perks}
+%- Also NEED an '\alias' for EACH other topic documented here.
+\title{ Perks Distribution Family Function }
+\description{
+ Maximum likelihood estimation of the 2-parameter
+ Perks distribution.
+
+}
+\usage{
+perks(lshape = "loge", lscale = "loge",
+ ishape = NULL, iscale = NULL,
+ nsimEIM = 500, oim.mean = FALSE, zero = NULL)
+}
+%- maybe also 'usage' for other objects documented here.
+\arguments{
+ \item{lshape, lscale}{
+ Parameter link functions applied to the
+ shape parameter \code{shape},
+ scale parameter \code{scale}.
+ All parameters are treated as positive here
+ See \code{\link{Links}} for more choices.
+
+
+ }
+
+% \item{eshape, escale}{
+% List. Extra argument for each of the links.
+% See \code{earg} in \code{\link{Links}} for general information.
+% }
+
+
+ \item{ishape, iscale}{
+ Optional initial values.
+ A \code{NULL} means a value is computed internally.
+
+
+ }
+ \item{nsimEIM, zero}{
+ See \code{\link{CommonVGAMffArguments}}.
+
+ }
+ \item{oim.mean}{
+ To be currently ignored.
+
+ }
+}
+\details{
+The Perks distribution
+has cumulative distribution function
+\deqn{F(x; \alpha, \beta) =
+1 -
+\left\{
+\frac{1 + \alpha}{1 + \alpha e^{\beta y}}
+\right\}^{1 / \beta}
+}{%
+F(x; alpha, beta) = 1 - ((1 + \alpha)/(1 + alpha * e^(beta * y)))^(1 / beta)
+}
+which leads to a probability density function
+\deqn{f(x; \alpha, \beta) =
+\left[ 1 + \alpha \right]^{1 / \beta}
+\alpha e^{\beta y} / (1 + \alpha e^{\beta y})^{1 + 1 / \beta}
+}{%
+f(x; alpha, beta) = [ 1 + alpha]^(1 / \beta) * alpha * exp(beta * y) / (1 + alpha * exp(beta * y))^(1 + 1 / beta)
+}
+for \eqn{\alpha > 0}{alpha > 0},
+\eqn{\beta > 0}{beta > 0},
+\eqn{x > 0}.
+Here, \eqn{\beta}{beta} is called the scale parameter \code{scale},
+and \eqn{\alpha}{alpha} is called a shape parameter.
+The moments for this distribution do
+not appear to be available in closed form.
+
+
+Simulated Fisher scoring is used and multiple responses are handled.
+
+
+}
+\value{
+ An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
+ The object is used by modelling functions such as \code{\link{vglm}},
+ and \code{\link{vgam}}.
+
+
+}
+\references{
+
+Perks, W. (1932)
+On some experiments in the graduation of mortality statistics.
+\emph{Journal of the Institute of Actuaries},
+\bold{63}, 12--40.
+
+
+
+Richards, S. J. (2012)
+A handbook of parametric survival models for actuarial use.
+\emph{Scandinavian Actuarial Journal}.
+1--25.
+
+
+}
+
+\author{ T. W. Yee }
+\section{Warning }{
+A lot of care is needed because
+this is a rather difficult distribution for parameter estimation.
+If the self-starting initial values fail then try experimenting
+with the initial value arguments, especially \code{iscale}.
+Successful convergence depends on having very good initial values.
+Also, monitor convergence by setting \code{trace = TRUE}.
+
+
+}
+
+\seealso{
+ \code{\link{dperks}}.
+
+
+}
+
+\examples{
+\dontrun{ set.seed(123)
+pdata <- data.frame(x2 = runif(nn <- 1000)) # x2 unused
+pdata <- transform(pdata, eta1 = -1,
+ ceta1 = 1)
+pdata <- transform(pdata, shape1 = exp(eta1),
+ scale1 = exp(ceta1))
+pdata <- transform(pdata,
+ y1 = rperks(nn, shape = shape1, scale = scale1))
+
+fit1 <- vglm(y1 ~ 1, perks, data = pdata, trace = TRUE)
+coef(fit1, matrix = TRUE)
+summary(fit1)
+}
+}
+\keyword{models}
+\keyword{regression}
+
+
+%# fit1 <- vglm(y1 ~ 1, perks, data = pdata, trace = TRUE)
+%# fit2 <- vglm(y1 ~ 1, perks(imeth = 2), data = pdata, trace = TRUE)
+% Argument \code{probs.y} is used only when \code{imethod = 2}.
+
diff --git a/man/perksUC.Rd b/man/perksUC.Rd
new file mode 100644
index 0000000..5deee4a
--- /dev/null
+++ b/man/perksUC.Rd
@@ -0,0 +1,76 @@
+\name{Perks}
+\alias{Perks}
+\alias{dperks}
+\alias{pperks}
+\alias{qperks}
+\alias{rperks}
+\title{The Perks Distribution}
+\description{
+ Density, cumulative distribution function,
+ quantile function
+ and
+ random generation for
+ the Perks distribution.
+
+}
+\usage{
+dperks(x, shape, scale = 1, log = FALSE)
+pperks(q, shape, scale = 1)
+qperks(p, shape, scale = 1)
+rperks(n, shape, scale = 1)
+}
+\arguments{
+ \item{x, q}{vector of quantiles.}
+ \item{p}{vector of probabilities.}
+ \item{n}{number of observations. }
+ \item{log}{
+ Logical.
+ If \code{log = TRUE} then the logarithm of the density is returned.
+
+ }
+ \item{shape, scale}{positive shape and scale parameters. }
+
+}
+\value{
+ \code{dperks} gives the density,
+ \code{pperks} gives the cumulative distribution function,
+ \code{qperks} gives the quantile function, and
+ \code{rperks} generates random deviates.
+
+
+}
+\author{ T. W. Yee }
+\details{
+ See \code{\link{perks}} for details.
+
+}
+%\note{
+%
+%}
+\seealso{
+ \code{\link{perks}}.
+
+
+}
+\examples{
+probs <- seq(0.01, 0.99, by = 0.01)
+Shape <- exp(-1.0); Scale <- exp(1);
+max(abs(pperks(qperks(p = probs, Shape, Scale),
+ Shape, Scale) - probs)) # Should be 0
+
+\dontrun{ x <- seq(-0.1, 07, by = 0.01);
+plot(x, dperks(x, Shape, Scale), type = "l", col = "blue", las = 1,
+ main = "Blue is density, orange is cumulative distribution function",
+ sub = "Purple lines are the 10,20,...,90 percentiles",
+ ylab = "", ylim = 0:1)
+abline(h = 0, col = "blue", lty = 2)
+lines(x, pperks(x, Shape, Scale), col = "orange")
+probs <- seq(0.1, 0.9, by = 0.1)
+Q <- qperks(probs, Shape, Scale)
+lines(Q, dperks(Q, Shape, Scale), col = "purple", lty = 3, type = "h")
+pperks(Q, Shape, Scale) - probs # Should be all zero
+abline(h = probs, col = "purple", lty = 3) }
+}
+\keyword{distribution}
+
+
diff --git a/man/persp.qrrvglm.Rd b/man/persp.qrrvglm.Rd
index ffc8493..9da7d8f 100644
--- a/man/persp.qrrvglm.Rd
+++ b/man/persp.qrrvglm.Rd
@@ -191,18 +191,18 @@ canonical Gaussian ordination.
\code{\link[graphics]{title}}.
}
\examples{\dontrun{
-hspider[,1:6] = scale(hspider[,1:6]) # Good idea when ITolerances = TRUE
+hspider[,1:6] <- scale(hspider[,1:6]) # Good idea when ITolerances = TRUE
set.seed(111)
-r1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
- Auloalbi, Pardmont, Pardnigr, Pardpull, Trocterr) ~
- WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- poissonff, hspider, trace = FALSE, ITolerances = TRUE)
+r1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardmont, Pardnigr, Pardpull, Trocterr) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ poissonff, hspider, trace = FALSE, ITolerances = TRUE)
set.seed(111) # r2 below is an ill-conditioned model
-r2 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
- Auloalbi, Pardmont, Pardnigr, Pardpull, Trocterr) ~
- WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- isdlv = c(2.4,1.0), Muxfactor = 3.0, trace = FALSE,
- poissonff, hspider, Rank = 2, EqualTolerances = TRUE)
+r2 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardmont, Pardnigr, Pardpull, Trocterr) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ isdlv = c(2.4,1.0), Muxfactor = 3.0, trace = FALSE,
+ poissonff, hspider, Rank = 2, EqualTolerances = TRUE)
sort(r1 at misc$deviance.Bestof) # A history of the fits
sort(r2 at misc$deviance.Bestof) # A history of the fits
diff --git a/man/plackUC.Rd b/man/plackUC.Rd
index a146720..8def019 100644
--- a/man/plackUC.Rd
+++ b/man/plackUC.Rd
@@ -60,12 +60,12 @@ Some contributions to contingency-type distributions.
}
\examples{
-\dontrun{ N = 101; oratio = exp(1)
-x = seq(0.0, 1.0, len = N)
-ox = expand.grid(x, x)
-z = dplack(ox[,1], ox[,2], oratio = oratio)
+\dontrun{ N <- 101; oratio <- exp(1)
+x <- seq(0.0, 1.0, len = N)
+ox <- expand.grid(x, x)
+z <- dplack(ox[,1], ox[,2], oratio = oratio)
contour(x, x, matrix(z, N, N), col = "blue")
-z = pplack(ox[,1], ox[,2], oratio = oratio)
+z <- pplack(ox[,1], ox[,2], oratio = oratio)
contour(x, x, matrix(z, N, N), col = "blue")
plot(rr <- rplack(n = 3000, oratio = oratio))
diff --git a/man/plackett.Rd b/man/plackett.Rd
index b1b4f6a..1a74248 100644
--- a/man/plackett.Rd
+++ b/man/plackett.Rd
@@ -8,14 +8,12 @@
}
\usage{
-plackett(link = "loge", earg = list(), ioratio = NULL,
- imethod = 1, nsimEIM = 200)
+plackett(link = "loge", ioratio = NULL, imethod = 1, nsimEIM = 200)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link, earg}{
- Link function and extra argument applied to the (positive) odds ratio
- \eqn{\psi}{psi}.
+ \item{link}{
+ Link function applied to the (positive) odds ratio \eqn{\psi}{psi}.
See \code{\link{Links}} for more choices
and information.
@@ -100,9 +98,9 @@ A class of bivariate distributions.
}
\examples{
-ymat = rplack(n = 2000, oratio = exp(2))
+ymat <- rplack(n = 2000, oratio = exp(2))
\dontrun{plot(ymat, col = "blue")}
-fit = vglm(ymat ~ 1, fam = plackett, trace = TRUE)
+fit <- vglm(ymat ~ 1, fam = plackett, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
vcov(fit)
diff --git a/man/plotdeplot.lmscreg.Rd b/man/plotdeplot.lmscreg.Rd
index 9aec090..ea2b202 100644
--- a/man/plotdeplot.lmscreg.Rd
+++ b/man/plotdeplot.lmscreg.Rd
@@ -104,7 +104,7 @@ contains further information and examples.
}
\examples{
-fit = vgam(BMI ~ s(age, df = c(4,2)), fam = lms.bcn(zero = 1), data = bmi.nz)
+fit <- vgam(BMI ~ s(age, df = c(4,2)), lms.bcn(zero = 1), bmi.nz)
\dontrun{ y = seq(15, 43, by = 0.25)
deplot(fit, x0 = 20, y = y, xlab = "BMI", col = "green", llwd = 2,
main = "BMI distribution at ages 20 (green), 40 (blue), 60 (orange)")
diff --git a/man/plotqrrvglm.Rd b/man/plotqrrvglm.Rd
index 3b4d80e..c00b230 100644
--- a/man/plotqrrvglm.Rd
+++ b/man/plotqrrvglm.Rd
@@ -6,8 +6,7 @@
The residuals of a QRR-VGLM are plotted for model diagnostic purposes.
}
\usage{
-plotqrrvglm(object,
- rtype = c("pearson", "response", "deviance", "working"),
+plotqrrvglm(object, rtype = c("response", "pearson", "deviance", "working"),
ask = FALSE,
main = paste(Rtype, "residuals vs latent variable(s)"),
xlab = "Latent Variable",
@@ -16,14 +15,14 @@ plotqrrvglm(object,
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{ An object of class \code{"qrrvglm"}. }
- \item{rtype}{ Character string giving residual type. By default, the first
- one is chosen. }
+ \item{rtype}{ Character string giving residual type.
+ By default, the first one is chosen. }
\item{ask}{ Logical. If \code{TRUE}, the user is asked to hit the return
key for the next plot. }
\item{main}{ Character string giving the title of the plot. }
\item{xlab}{ Character string giving the x-axis caption. }
\item{ITolerances}{ Logical. This argument is fed into
- \code{Coef(object, ITolerances=ITolerances)}.
+ \code{Coef(object, ITolerances = ITolerances)}.
}
\item{\dots}{ Other plotting arguments (see \code{\link[graphics]{par}}). }
}
@@ -31,10 +30,12 @@ plotqrrvglm(object,
Plotting the residuals can be potentially very useful for checking
that the model fit is adequate.
+
}
\value{
The original object.
+
}
\references{
@@ -59,20 +60,21 @@ canonical Gaussian ordination.
\code{\link{lvplot.qrrvglm}},
\code{\link{cqo}}.
+
}
\examples{\dontrun{
# QRR-VGLM on the hunting spiders data
# This is computationally expensive
-set.seed(111) # This leads to the global solution
+set.seed(111) # This leads to the global solution
# hspider[,1:6]=scale(hspider[,1:6]) # Standardize the environmental variables
-p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
- Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
- Trocterr, Zoraspin) ~
- WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- fam = quasipoissonff, data = hspider, Crow1positive = FALSE)
+p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
+ Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ quasipoissonff, data = hspider, Crow1positive = FALSE)
par(mfrow = c(3, 4))
-plot(p1, rtype = "d", col = "blue", pch = 4, las = 1)
+plot(p1, rtype = "response", col = "blue", pch = 4, las = 1, main = "")
}
}
\keyword{dplot}
diff --git a/man/plotqtplot.lmscreg.Rd b/man/plotqtplot.lmscreg.Rd
index e0fddbe..e5d3734 100644
--- a/man/plotqtplot.lmscreg.Rd
+++ b/man/plotqtplot.lmscreg.Rd
@@ -98,7 +98,7 @@ contains further information and examples.
}
\examples{\dontrun{
-fit = vgam(BMI ~ s(age, df = c(4,2)), fam = lms.bcn(zero = 1), data = bmi.nz)
+fit <- vgam(BMI ~ s(age, df = c(4,2)), lms.bcn(zero = 1), data = bmi.nz)
qtplot(fit)
qtplot(fit, perc = c(25,50,75,95), lcol = "blue", tcol = "blue", llwd = 2)
}
diff --git a/man/plotrcam0.Rd b/man/plotrcim0.Rd
similarity index 86%
rename from man/plotrcam0.Rd
rename to man/plotrcim0.Rd
index 860aad9..cd71d82 100644
--- a/man/plotrcam0.Rd
+++ b/man/plotrcim0.Rd
@@ -1,17 +1,17 @@
-\name{plotrcam0}
-\alias{plotrcam0}
+\name{plotrcim0}
+\alias{plotrcim0}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
- Main effects plot for a Row-Column Association Model (RCAM)
+ Main effects plot for a Row-Column Interaction Model (RCIM)
}
\description{
- Produces a main effects plot for Row-Column Association
- Models (RCAMs).
+ Produces a main effects plot for Row-Column Interaction
+ Models (RCIMs).
}
\usage{
- plotrcam0(object, centered = TRUE, whichplots = c(1, 2),
+ plotrcim0(object, centered = TRUE, whichplots = c(1, 2),
hline0 = TRUE, hlty = "dashed", hcol = par()$col, hlwd = par()$lwd,
rfirst = 1, cfirst = 1,
rtype = "h", ctype = "h",
@@ -26,7 +26,7 @@
}
\arguments{
\item{object}{
- An \code{\link{rcam}} object.
+ An \code{\link{rcim}} object.
This should be of rank-0, i.e., main effects only and no
interactions.
@@ -130,7 +130,7 @@
}
\details{
- This function plots the row and column effects of a rank-0 RCAM.
+ This function plots the row and column effects of a rank-0 RCIM.
As the result is a main effects plot of a regression analysis, its
interpretation when \code{centered = FALSE} is relative
to the baseline (reference level) of a row and column, and
@@ -151,7 +151,7 @@
\note{
- This function should be only used to plot the object of rank-0 RCAM.
+ This function should be only used to plot the object of rank-0 RCIM.
If the rank is positive then it will issue a warning.
@@ -179,13 +179,13 @@
\seealso{
\code{\link{moffset}}
- \code{\link{Rcam}},
- \code{\link{rcam}}.
+ \code{\link{Rcim}},
+ \code{\link{rcim}}.
}
\examples{
-alcoff.e <- moffset(alcoff, "6", "Monday", postfix = "*") # Effective day
-fit0 <- rcam(alcoff.e, family = poissonff)
+alcoff.e <- moffset(alcoff, "6", "Mon", postfix = "*") # Effective day
+fit0 <- rcim(alcoff.e, family = poissonff)
\dontrun{par(oma = c(0, 0, 4, 0), mfrow = 1:2) # For all plots below too
ii = plot(fit0, rcol = "blue", ccol = "orange",
lwd = 4, ylim = c(-2, 2), # A common ylim
@@ -195,22 +195,22 @@ ii at post # Endowed with additional information
}
# Negative binomial example
-fit1 <- rcam(alcoff.e, negbinomial, trace = TRUE)
+fit1 <- rcim(alcoff.e, negbinomial, trace = TRUE)
\dontrun{ plot(fit1, ylim = c(-2, 2)) }
# Univariate normal example
-fit2 <- rcam(alcoff.e, normal1, trace = TRUE)
+fit2 <- rcim(alcoff.e, normal1, trace = TRUE)
\dontrun{ plot(fit2, ylim = c(-200, 400)) }
# Median-polish example
-fit3 <- rcam(alcoff.e, alaplace2(tau = 0.5, intparloc = TRUE),
+fit3 <- rcim(alcoff.e, alaplace2(tau = 0.5, intparloc = TRUE),
trace = TRUE)
\dontrun{ plot(fit3, ylim = c(-200, 250)) }
# Zero-inflated Poisson example on "crashp" (no 0s in alcoff)
cbind(rowSums(crashp)) # Easy to see the data
cbind(colSums(crashp)) # Easy to see the data
-fit4 <- rcam(Rcam(crashp, rbaseline = "5", cbaseline = "Sunday"),
+fit4 <- rcim(Rcim(crashp, rbaseline = "5", cbaseline = "Sun"),
zipoissonff, trace = TRUE)
\dontrun{ plot(fit4, ylim = c(-3, 3)) }
}
diff --git a/man/poissonff.Rd b/man/poissonff.Rd
index 9376183..77aad99 100644
--- a/man/poissonff.Rd
+++ b/man/poissonff.Rd
@@ -10,13 +10,14 @@
}
\usage{
-poissonff(link = "loge", earg=list(), dispersion = 1, onedpar = FALSE,
- imu = NULL, imethod = 1, parallel = FALSE, zero = NULL)
+poissonff(link = "loge", dispersion = 1, onedpar = FALSE, imu = NULL,
+ imethod = 1, parallel = FALSE, zero = NULL, bred = FALSE,
+ earg.link = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link, earg}{
- Link function and extra argument applied to the mean or means.
+ \item{link}{
+ Link function applied to the mean or means.
See \code{\link{Links}} for more choices
and information.
@@ -51,6 +52,13 @@ poissonff(link = "loge", earg=list(), dispersion = 1, onedpar = FALSE,
are modelled as intercepts only. The values must be from the set
\{1,2,\ldots,\eqn{M}\}, where \eqn{M} is the number of columns of the
matrix response.
+ See \code{\link{CommonVGAMffArguments}} for more information.
+
+
+ }
+ \item{bred, earg.link}{
+ Details at \code{\link{CommonVGAMffArguments}}.
+
}
}
@@ -151,25 +159,25 @@ poissonff(link = "loge", earg=list(), dispersion = 1, onedpar = FALSE,
\examples{
poissonff()
-pdat = data.frame(x = rnorm(nn <- 100))
-pdat = transform(pdat, y = rpois(nn, exp(1+x)))
-(fit = vglm(y ~ x, family = poissonff, pdat))
+pdata <- data.frame(x = rnorm(nn <- 100))
+pdata <- transform(pdata, y = rpois(nn, exp(1+x)))
+(fit <- vglm(y ~ x, family = poissonff, pdata))
coef(fit, matrix = TRUE)
-nn = 200
-cdat = data.frame(x2 = rnorm(nn), x3 = rnorm(nn), x4 = rnorm(nn))
-cdat = transform(cdat, lv1 = 0 + x3 - 2*x4)
-cdat = transform(cdat, lambda1 = exp(3 - 0.5 * (lv1-0)^2),
- lambda2 = exp(2 - 0.5 * (lv1-1)^2),
- lambda3 = exp(2 - 0.5 * ((lv1+4)/2)^2))
-cdat = transform(cdat, y1 = rpois(nn, lambda1),
- y2 = rpois(nn, lambda2),
- y3 = rpois(nn, lambda3))
-# vvv p1 = cqo(cbind(y1,y2,y3) ~ x2 + x3 + x4, poissonff, cdat,
-# vvv EqualTol = FALSE, ITol = FALSE)
-# vvv summary(p1) # # Three dispersion parameters are all unity
+nn <- 200
+cdata <- data.frame(x2 = rnorm(nn), x3 = rnorm(nn), x4 = rnorm(nn))
+cdata <- transform(cdata, lv1 = 0 + x3 - 2*x4)
+cdata <- transform(cdata, lambda1 = exp(3 - 0.5 * (lv1-0)^2),
+ lambda2 = exp(2 - 0.5 * (lv1-1)^2),
+ lambda3 = exp(2 - 0.5 * ((lv1+4)/2)^2))
+cdata <- transform(cdata, y1 = rpois(nn, lambda1),
+ y2 = rpois(nn, lambda2),
+ y3 = rpois(nn, lambda3))
\dontrun{ lvplot(p1, y = TRUE, lcol = 2:4, pch = 2:4, pcol = 2:4, rug = FALSE) }
}
\keyword{models}
\keyword{regression}
+%# vvv p1 = cqo(cbind(y1,y2,y3) ~ x2 + x3 + x4, poissonff, cdata,
+%# vvv EqualTol = FALSE, ITol = FALSE)
+%# vvv summary(p1) # # Three dispersion parameters are all unity
diff --git a/man/poissonp.Rd b/man/poissonp.Rd
index 24d1fd1..2b4134e 100644
--- a/man/poissonp.Rd
+++ b/man/poissonp.Rd
@@ -8,8 +8,8 @@
}
\usage{
-poissonp(ostatistic, dimension=2, link="loge", earg=list(),
- idensity=NULL, imethod=1)
+poissonp(ostatistic, dimension = 2, link = "loge",
+ idensity = NULL, imethod = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -29,11 +29,6 @@ poissonp(ostatistic, dimension=2, link="loge", earg=list(),
See \code{\link{Links}} for more choices.
}
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{idensity}{
Optional initial value for the parameter.
A \code{NULL} value means a value is obtained internally.
@@ -97,13 +92,13 @@ poissonp(ostatistic, dimension=2, link="loge", earg=list(),
}
\examples{
-pdat = data.frame(y = rgamma(10, shape=exp(-1))) # Not good data!
-os = 2
-fit = vglm(y ~ 1, poissonp(os, 2), pdat, tra=TRUE, crit="c")
-fit = vglm(y ~ 1, poissonp(os, 3), pdat, tra=TRUE, crit="c") # Slow convergence?
-fit = vglm(y ~ 1, poissonp(os, 3, idensi=1), pdat, trace=TRUE, crit="c")
+pdata <- data.frame(y = rgamma(10, shape = exp(-1))) # Not proper data!
+os <- 2
+fit <- vglm(y ~ 1, poissonp(os, 2), pdata, tra = TRUE, crit = "c")
+fit <- vglm(y ~ 1, poissonp(os, 3), pdata, tra = TRUE, crit = "c") # Slow convergence?
+fit <- vglm(y ~ 1, poissonp(os, 3, idensi = 1), pdata, trace = TRUE, crit = "c")
head(fitted(fit))
-with(pdat, mean(y))
+with(pdata, mean(y))
coef(fit, matrix = TRUE)
Coef(fit)
}
diff --git a/man/polf.Rd b/man/polf.Rd
index 7de881d..8bc00e3 100644
--- a/man/polf.Rd
+++ b/man/polf.Rd
@@ -8,7 +8,7 @@
}
\usage{
-polf(theta, earg = stop("argument 'earg' must be given"),
+polf(theta, cutpoint = NULL,
inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
}
%- maybe also 'usage' for other objects documented here.
@@ -18,32 +18,16 @@ polf(theta, earg = stop("argument 'earg' must be given"),
See below for further details.
}
- \item{earg}{
- Extra argument for passing in additional information.
- This must be list with component \code{cutpoint}.
+ \item{cutpoint}{
The cutpoints should be non-negative integers.
If \code{polf()} is used as the link function in
\code{\link{cumulative}} then one should choose
\code{reverse = TRUE, parallel = TRUE, intercept.apply = TRUE}.
}
- \item{inverse}{
- Logical. If \code{TRUE} the inverse function is computed.
+ \item{inverse, deriv, short, tag}{
+ Details at \code{\link{Links}}.
- }
- \item{deriv}{
- Order of the derivative. Integer with value 0, 1 or 2.
-
- }
- \item{short}{
- Used for labelling the \code{blurb} slot of a
- \code{\link{vglmff-class}} object.
-
- }
- \item{tag}{
- Used for labelling the linear/additive predictor in the
- \code{initialize} slot of a \code{\link{vglmff-class}} object.
- Contains a little more information if \code{TRUE}.
}
}
@@ -55,8 +39,6 @@ polf(theta, earg = stop("argument 'earg' must be given"),
If the cutpoint is zero then a complementary log-log link is used.
- The arguments \code{short} and \code{tag} are used only if
- \code{theta} is character.
See \code{\link{Links}} for general information about \pkg{VGAM}
@@ -112,52 +94,47 @@ polf(theta, earg = stop("argument 'earg' must be given"),
}
\examples{
-earg = list(cutpoint = 2)
-polf("p", earg = earg, short = FALSE)
-polf("p", earg = earg, tag = TRUE)
+polf("p", cutpoint = 2, short = FALSE)
+polf("p", cutpoint = 2, tag = TRUE)
-p = seq(0.01, 0.99, by=0.01)
-y = polf(p, earg = earg)
-y. = polf(p, earg = earg, deriv = 1)
-max(abs(polf(y, earg = earg, inv = TRUE) - p)) # Should be 0
+p <- seq(0.01, 0.99, by = 0.01)
+y <- polf(p, cutpoint = 2)
+y. <- polf(p, cutpoint = 2, deriv = 1)
+max(abs(polf(y, cutpoint = 2, inv = TRUE) - p)) # Should be 0
-\dontrun{
-par(mfrow=c(2,1), las = 1)
+\dontrun{par(mfrow = c(2, 1), las = 1)
plot(p, y, type = "l", col = "blue", main = "polf()")
-abline(h=0, v=0.5, col = "red", lty = "dashed")
+abline(h = 0, v = 0.5, col = "orange", lty = "dashed")
plot(p, y., type = "l", col = "blue",
main = "(Reciprocal of) first POLF derivative") }
# Rutherford and Geiger data
-ruge = data.frame(yy = rep(0:14,
- times=c(57,203,383,525,532,408,273,139,45,27,10,4,0,1,1)))
-
+ruge <- data.frame(yy = rep(0:14,
+ times = c(57,203,383,525,532,408,273,139,45,27,10,4,0,1,1)))
with(ruge, length(yy)) # 2608 1/8-minute intervals
-cutpoint = 5
-ruge = transform(ruge, yy01 = ifelse(yy <= cutpoint, 0, 1))
-earg = list(cutpoint=cutpoint)
-fit = vglm(yy01 ~ 1, binomialff(link = "polf", earg = earg), ruge)
+cutpoint <- 5
+ruge <- transform(ruge, yy01 = ifelse(yy <= cutpoint, 0, 1))
+fit <- vglm(yy01 ~ 1, binomialff(link = polf(cutpoint = cutpoint)), ruge)
coef(fit, matrix = TRUE)
exp(coef(fit))
# Another example
-pdat = data.frame(x2 = sort(runif(nn <- 1000)))
-pdat = transform(pdat, x3 = runif(nn))
-pdat = transform(pdat, mymu = exp( 3 + 1 * x2 - 2 * x3))
-pdat = transform(pdat, y1 = rpois(nn, lambda=mymu))
-cutpoints = c(-Inf, 10, 20, Inf)
-pdat = transform(pdat, cuty = Cut(y1, breaks=cutpoints))
-\dontrun{
-with(pdat, plot(x2, x3, col=cuty, pch=as.character(cuty))) }
-with(pdat, table(cuty) / sum(table(cuty)))
-fit = vglm(cuty ~ x2 + x3, fam = cumulative(link = "polf",
- reverse = TRUE, parallel = TRUE, intercept.apply = TRUE,
- mv = TRUE, earg = list(cutpoint=cutpoints[2:3])),
- pdat, trace = TRUE)
-head(fit at y)
+pdata <- data.frame(x2 = sort(runif(nn <- 1000)))
+pdata <- transform(pdata, x3 = runif(nn))
+pdata <- transform(pdata, mymu = exp( 3 + 1 * x2 - 2 * x3))
+pdata <- transform(pdata, y1 = rpois(nn, lambda = mymu))
+cutpoints <- c(-Inf, 10, 20, Inf)
+pdata <- transform(pdata, cuty = Cut(y1, breaks = cutpoints))
+\dontrun{ with(pdata, plot(x2, x3, col = cuty, pch = as.character(cuty))) }
+with(pdata, table(cuty) / sum(table(cuty)))
+fit <- vglm(cuty ~ x2 + x3, cumulative(reverse = TRUE,
+ parallel = TRUE, intercept.apply = TRUE,
+ link = polf(cutpoint = cutpoints[2:3]),
+ mv = TRUE), data = pdata, trace = TRUE)
+head(depvar(fit))
head(fitted(fit))
head(predict(fit))
coef(fit)
diff --git a/man/polonoUC.Rd b/man/polonoUC.Rd
index 9bd31b0..19fc88f 100644
--- a/man/polonoUC.Rd
+++ b/man/polonoUC.Rd
@@ -122,7 +122,7 @@ rpolono(n, meanlog = 0, sdlog = 1)
}
\examples{
-meanlog = 0.5; sdlog = 0.5; yy = 0:19
+meanlog <- 0.5; sdlog <- 0.5; yy <- 0:19
sum(proby <- dpolono(yy, m = meanlog, sd = sdlog)) # Should be 1
max(abs(cumsum(proby) - ppolono(yy, m = meanlog, sd = sdlog))) # Should be 0
@@ -132,7 +132,7 @@ plot(yy, proby, type = "h", col = "blue", ylab = "P[Y=y]", log = "",
main = paste("Poisson lognormal(m = ", meanlog,
", sdl = ", sdlog, ")", sep = ""))
-y = 0:190 # More extreme values; use the approximation and plot on a log scale
+y <- 0:190 # More extreme values; use the approximation and plot on a log scale
(sum(proby <- dpolono(y, m = meanlog, sd = sdlog, bigx = 100))) # Should be 1
plot(y, proby, type = "h", col = "blue", ylab = "P[Y=y] (log)", log = "y",
main = paste("Poisson lognormal(m = ", meanlog,
diff --git a/man/posbinomUC.Rd b/man/posbinomUC.Rd
index 7a66855..0776803 100644
--- a/man/posbinomUC.Rd
+++ b/man/posbinomUC.Rd
@@ -103,13 +103,13 @@ rposbinom(n, size, prob)
}
\examples{
-prob = 0.2; size = 10
+prob <- 0.2; size <- 10
table(y <- rposbinom(n = 1000, size, prob))
-mean(y) # Sample mean
+mean(y) # Sample mean
size * prob / (1-(1-prob)^size) # Population mean
-(ii = dposbinom(0:size, size, prob))
-cumsum(ii) - pposbinom(0:size, size, prob) # Should be 0s
+(ii <- dposbinom(0:size, size, prob))
+cumsum(ii) - pposbinom(0:size, size, prob) # Should be 0s
table(rposbinom(100, size, prob))
table(qposbinom(runif(1000), size, prob))
@@ -118,24 +118,25 @@ round(dposbinom(1:10, size, prob) * 1000) # Should be similar
\dontrun{ barplot(rbind(dposbinom(x = 0:size, size, prob),
dbinom(x = 0:size, size, prob)),
beside = TRUE, col = c("blue", "green"),
- main=paste("Positive-binomial(", size, ",", prob, ") (blue) vs",
+ main = paste("Positive-binomial(", size, ",",
+ prob, ") (blue) vs",
" Binomial(", size, ",", prob, ") (green)", sep = ""),
names.arg = as.character(0:size), las = 1) }
# Simulated data example
-nn = 1000; sizeval1 = 10; sizeval2 = 20
-pdat <- data.frame(x2 = seq(0, 1, length = nn))
-pdat <- transform(pdat, prob1 = logit(-2 + 2 * x2, inverse = TRUE),
- prob2 = logit(-1 + 1 * x2, inverse = TRUE),
- sizev1 = rep(sizeval1, len = nn),
- sizev2 = rep(sizeval2, len = nn))
-pdat <- transform(pdat, y1 = rposbinom(nn, size = sizev1, prob = prob1),
- y2 = rposbinom(nn, size = sizev2, prob = prob2))
-with(pdat, table(y1))
-with(pdat, table(y2))
+nn <- 1000; sizeval1 <- 10; sizeval2 <- 20
+pdata <- data.frame(x2 = seq(0, 1, length = nn))
+pdata <- transform(pdata, prob1 = logit(-2 + 2 * x2, inverse = TRUE),
+ prob2 = logit(-1 + 1 * x2, inverse = TRUE),
+ sizev1 = rep(sizeval1, len = nn),
+ sizev2 = rep(sizeval2, len = nn))
+pdata <- transform(pdata, y1 = rposbinom(nn, size = sizev1, prob = prob1),
+ y2 = rposbinom(nn, size = sizev2, prob = prob2))
+with(pdata, table(y1))
+with(pdata, table(y2))
# Multivariate response
-fit2 = vglm(cbind(y1, y2) ~ x2, posbinomial(mv = TRUE),
- trace = TRUE, pdat, weight = cbind(sizev1, sizev2))
+fit2 <- vglm(cbind(y1, y2) ~ x2, posbinomial(mv = TRUE),
+ trace = TRUE, pdata, weight = cbind(sizev1, sizev2))
coef(fit2, matrix = TRUE)
}
\keyword{distribution}
diff --git a/man/posbinomial.Rd b/man/posbinomial.Rd
index f243fd6..ea97291 100644
--- a/man/posbinomial.Rd
+++ b/man/posbinomial.Rd
@@ -6,13 +6,12 @@
Fits a positive binomial distribution.
}
\usage{
-posbinomial(link = "logit", earg = list(),
- mv = FALSE, parallel = FALSE, zero = NULL)
+posbinomial(link = "logit", mv = FALSE, parallel = FALSE, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link, earg}{
- Link function and its extra argument for the usual probability parameter.
+ \item{link}{
+ Link function for the usual probability parameter.
See \code{\link{CommonVGAMffArguments}} for more information.
}
@@ -39,6 +38,7 @@ posbinomial(link = "logit", earg = list(),
}
\references{
+
Patil, G. P. (1962)
Maximum likelihood estimation for
generalised power series distributions and its application to a
@@ -47,11 +47,6 @@ truncated binomial distribution.
\bold{49}, 227--237.
-Documentation accompanying the \pkg{VGAM} package at
-\url{http://www.stat.auckland.ac.nz/~yee}
-contains further information and examples.
-
-
}
\author{ Thomas W. Yee }
@@ -88,11 +83,11 @@ contains further information and examples.
\examples{
# Number of albinotic children in families with 5 kids (from Patil, 1962)
-akids = data.frame(y = c(rep(1, 25), rep(2, 23), rep(3, 10), 4, 5),
- n = rep(5, 60))
-fit1 = vglm(cbind(y, n-y) ~ 1, posbinomial, akids, trace = TRUE)
+akids <- data.frame(y = c(rep(1, 25), rep(2, 23), rep(3, 10), 4, 5),
+ n = rep(5, 60))
+fit1 <- vglm(cbind(y, n-y) ~ 1, posbinomial, akids, trace = TRUE)
summary(fit1)
-Coef(fit1) # = MLE of p = 0.3088
+Coef(fit1) # = MLE of p = 0.3088
head(fitted(fit1))
}
\keyword{models}
diff --git a/man/posnegbinUC.Rd b/man/posnegbinUC.Rd
index 55fd3b4..790b98a 100644
--- a/man/posnegbinUC.Rd
+++ b/man/posnegbinUC.Rd
@@ -104,13 +104,13 @@ for counts with extra zeros.
\examples{
munb <- 5; size <- 4; n <- 1000
table(y <- rposnegbin(n, munb = munb, size = size))
-mean(y) # sample mean
+mean(y) # sample mean
munb / (1 - (size / (size + munb))^size) # population mean
munb / pnbinom(0, mu = munb, size = size, lower.tail = FALSE) # same as before
x <- (-1):17
(ii <- dposnegbin(x, munb = munb, size = size))
-max(abs(cumsum(ii) - pposnegbin(x, munb = munb, size = size))) # Should be 0
+max(abs(cumsum(ii) - pposnegbin(x, munb = munb, size = size))) # Should be 0
\dontrun{
x <- 0:10
@@ -125,7 +125,7 @@ barplot(rbind(dposnegbin(x, munb = munb, size = size),
nn <- 5000
mytab <- cumsum(table(rposnegbin(nn, munb = munb, size = size))) / nn
myans <- pposnegbin(sort(as.numeric(names(mytab))), munb = munb, size = size)
-max(abs(mytab - myans)) # Should be 0
+max(abs(mytab - myans)) # Should be 0
}
\keyword{distribution}
diff --git a/man/posnegbinomial.Rd b/man/posnegbinomial.Rd
index 4a8de87..a21da75 100644
--- a/man/posnegbinomial.Rd
+++ b/man/posnegbinomial.Rd
@@ -8,7 +8,7 @@
}
\usage{
-posnegbinomial(lmunb = "loge", lsize = "loge", emunb = list(), esize = list(),
+posnegbinomial(lmunb = "loge", lsize = "loge",
isize = NULL, zero = -2, nsimEIM = 250,
shrinkage.init = 0.95, imethod = 1)
@@ -20,16 +20,13 @@ posnegbinomial(lmunb = "loge", lsize = "loge", emunb = list(), esize = list(),
\eqn{\mu_{nb}}{munb} of an ordinary negative binomial distribution.
See \code{\link{Links}} for more choices.
+
}
\item{lsize}{
Parameter link function applied to the dispersion parameter,
called \code{k}.
See \code{\link{Links}} for more choices.
- }
- \item{emunb, esize}{
- List. Extra argument for the respective links.
- See \code{earg} in \code{\link{Links}} for general information.
}
\item{isize}{
@@ -42,6 +39,7 @@ posnegbinomial(lmunb = "loge", lsize = "loge", emunb = list(), esize = list(),
A value \code{NULL} means an initial value for each response is
computed internally using a range of values.
+
}
\item{nsimEIM, zero}{
See \code{\link{CommonVGAMffArguments}}.
@@ -88,12 +86,19 @@ posnegbinomial(lmunb = "loge", lsize = "loge", emunb = list(), esize = list(),
occur. Possibly a loglog link could be added in the future to try help
handle this problem.
+
+ This \pkg{VGAM} family function is computationally expensive
+ and usually runs slowly;
+ setting \code{trace = TRUE} is useful for monitoring convergence.
+
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}},
\code{\link{rrvglm}} and \code{\link{vgam}}.
+
}
\references{
Barry, S. C. and Welsh, A. H. (2002)
@@ -103,6 +108,15 @@ posnegbinomial(lmunb = "loge", lsize = "loge", emunb = list(), esize = list(),
179--188.
+ Fisher, R. A., Corbet, A. S. and Williams, C. B. (1943)
+ The Relation Between the Number of Species and
+ the Number of Individuals in a Random Sample of an Animal
+ Population,
+ \emph{Journal of Animal Ecology},
+ \bold{12},
+ 42--58.
+
+
Williamson, E. and Bretherton, M. H. (1964)
Tables of the logarithmic series distribution.
\emph{Annals of Mathematical Statistics},
@@ -113,7 +127,8 @@ posnegbinomial(lmunb = "loge", lsize = "loge", emunb = list(), esize = list(),
}
\author{ Thomas W. Yee }
\note{
- This family function can handle a multivariate response.
+ This family function handles multiple responses.
+
}
@@ -126,13 +141,15 @@ posnegbinomial(lmunb = "loge", lsize = "loge", emunb = list(), esize = list(),
\code{\link[stats:NegBinomial]{rnbinom}},
\code{\link{CommonVGAMffArguments}}.
+
}
\examples{
-pdata <- data.frame(x = runif(nn <- 1000))
-pdata <- transform(pdata, y1 = rposnegbin(nn, munb = exp(0+2*x), size = exp(1)),
- y2 = rposnegbin(nn, munb = exp(1+2*x), size = exp(3)))
-fit <- vglm(cbind(y1, y2) ~ x, posnegbinomial, pdata, trace = TRUE)
+\dontrun{
+pdata <- data.frame(x2 = runif(nn <- 1000))
+pdata <- transform(pdata, y1 = rposnegbin(nn, munb = exp(0+2*x2), size = exp(1)),
+ y2 = rposnegbin(nn, munb = exp(1+2*x2), size = exp(3)))
+fit <- vglm(cbind(y1, y2) ~ x2, posnegbinomial, pdata, trace = TRUE)
coef(fit, matrix = TRUE)
dim(depvar(fit)) # dim(fit at y) is not as good
@@ -143,7 +160,7 @@ pdata2 <- transform(pdata2, y3 = rposnegbin(nn, munb = munb, size = size))
with(pdata2, table(y3))
fit <- vglm(y3 ~ 1, posnegbinomial, pdata2, trace = TRUE)
coef(fit, matrix = TRUE)
-with(pdata2, mean(y3)) # Sample mean
+with(pdata2, mean(y3)) # Sample mean
head(with(pdata2, munb/(1-(size/(size+munb))^size)), 1) # Population mean
head(fitted(fit), 3)
head(predict(fit), 3)
@@ -159,10 +176,11 @@ Coef(fit)
(khat <- Coef(fit)["size"])
pdf2 <- dposnegbin(x = with(corbet, nindiv), mu = fitted(fit), size = khat)
print( with(corbet, cbind(nindiv, ofreq, fitted = pdf2*sum(ofreq))), dig = 1)
-\dontrun{ with(corbet,
+with(corbet,
matplot(nindiv, cbind(ofreq, fitted = pdf2*sum(ofreq)), las = 1,
type = "b", ylab = "Frequency", col = c("blue", "orange"),
- main = "blue 1s = observe; orange 2s = fitted")) }
+ main = "blue 1s = observe; orange 2s = fitted"))
+}
}
\keyword{models}
\keyword{regression}
diff --git a/man/posnormUC.Rd b/man/posnormUC.Rd
index 2fd914b..ffa64d6 100644
--- a/man/posnormUC.Rd
+++ b/man/posnormUC.Rd
@@ -41,6 +41,7 @@ rposnorm(n, mean = 0, sd = 1)
for estimating the parameters,
for the formula of the probability density function and other details.
+
}
%\note{
@@ -50,15 +51,15 @@ rposnorm(n, mean = 0, sd = 1)
}
\examples{
-\dontrun{ m = 0.8; x = seq(-1, 4, len = 501)
+\dontrun{ m <- 0.8; x <- seq(-1, 4, len = 501)
plot(x, dposnorm(x, m = m), type = "l", ylim = 0:1, las = 1,
ylab = paste("posnorm(m = ", m, ", sd = 1)"), col = "blue",
- main = "Blue is density, red is cumulative distribution function",
+ main = "Blue is density, orange is cumulative distribution function",
sub = "Purple lines are the 10,20,...,90 percentiles")
-lines(x, pposnorm(x, m = m), col = "red")
-abline(h = 0)
-probs = seq(0.1, 0.9, by = 0.1)
-Q = qposnorm(probs, m = m)
+lines(x, pposnorm(x, m = m), col = "orange")
+abline(h = 0, col = "grey")
+probs <- seq(0.1, 0.9, by = 0.1)
+Q <- qposnorm(probs, m = m)
lines(Q, dposnorm(Q, m = m), col = "purple", lty = 3, type = "h")
lines(Q, pposnorm(Q, m = m), col = "purple", lty = 3, type = "h")
abline(h = probs, col = "purple", lty = 3)
diff --git a/man/posnormal1.Rd b/man/posnormal1.Rd
index d2109aa..3b3f750 100644
--- a/man/posnormal1.Rd
+++ b/man/posnormal1.Rd
@@ -6,7 +6,7 @@
Fits a positive (univariate) normal distribution.
}
\usage{
-posnormal1(lmean = "identity", lsd = "loge", emean = list(), esd = list(),
+posnormal1(lmean = "identity", lsd = "loge",
imean = NULL, isd = NULL, nsimEIM = 100, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -18,11 +18,16 @@ posnormal1(lmean = "identity", lsd = "loge", emean = list(), esd = list(),
See \code{\link{Links}} for more choices.
}
- \item{emean, esd}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
- }
+
+% \item{emean, esd}{
+% List. Extra argument for each of the links.
+% See \code{earg} in \code{\link{Links}} for general information.
+%emean = list(), esd = list(),
+%
+% }
+
+
\item{imean, isd}{
Optional initial values for \eqn{\mu}{mu} and \eqn{\sigma}{sigma}.
A \code{NULL} means a value is computed internally.
@@ -72,6 +77,8 @@ posnormal1(lmean = "identity", lsd = "loge", emean = list(), esd = list(),
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
+
}
\references{
@@ -79,6 +86,7 @@ posnormal1(lmean = "identity", lsd = "loge", emean = list(), esd = list(),
\url{http://www.stat.auckland.ac.nz/~yee}
contains further information and examples.
+
}
\author{ Thomas W. Yee }
\note{
@@ -87,6 +95,7 @@ posnormal1(lmean = "identity", lsd = "loge", emean = list(), esd = list(),
Reasonably good initial values are needed.
Fisher scoring is implemented.
+
The distribution of the reciprocal of a positive normal random variable
is known as an alpha distribution.
@@ -95,6 +104,7 @@ posnormal1(lmean = "identity", lsd = "loge", emean = list(), esd = list(),
\section{Warning }{
Under- or over-flow may occur if the data is ill-conditioned.
+
}
\seealso{
\code{\link{normal1}},
@@ -104,15 +114,15 @@ posnormal1(lmean = "identity", lsd = "loge", emean = list(), esd = list(),
}
\examples{
-pdata = data.frame(m = 1.0, SD = exp(1.0))
-pdata = transform(pdata, y = rposnorm(n <- 1000, m = m, sd = SD))
+pdata <- data.frame(m = 1.0, SD = exp(1.0))
+pdata <- transform(pdata, y = rposnorm(n <- 1000, m = m, sd = SD))
\dontrun{with(pdata, hist(y, prob = TRUE, border = "blue",
main = paste("posnorm(m =", m[1], ", sd =", round(SD[1], 2),")"))) }
-fit = vglm(y ~ 1, fam = posnormal1, pdata, trace = TRUE)
+fit <- vglm(y ~ 1, fam = posnormal1, pdata, trace = TRUE)
coef(fit, matrix = TRUE)
-(Cfit = Coef(fit))
-mygrid = with(pdata, seq(min(y), max(y), len = 200)) # Add the fit to the histogram
+(Cfit <- Coef(fit))
+mygrid <- with(pdata, seq(min(y), max(y), len = 200)) # Add the fit to the histogram
\dontrun{lines(mygrid, dposnorm(mygrid, Cfit[1], Cfit[2]), col = "red")}
}
\keyword{models}
diff --git a/man/pospoisson.Rd b/man/pospoisson.Rd
index 83748d4..ee99ba6 100644
--- a/man/pospoisson.Rd
+++ b/man/pospoisson.Rd
@@ -6,13 +6,13 @@
Fits a positive Poisson distribution.
}
\usage{
-pospoisson(link = "loge", earg = list(),
- expected = TRUE, ilambda = NULL, imethod = 1)
+pospoisson(link = "loge", expected = TRUE,
+ ilambda = NULL, imethod = 1, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link, earg}{
- Link function and extra argument for the usual mean (lambda) parameter of
+ \item{link}{
+ Link function for the usual mean (lambda) parameter of
an ordinary Poisson distribution.
See \code{\link{Links}} for more choices.
@@ -22,7 +22,7 @@ pospoisson(link = "loge", earg = list(),
Fisher scoring is used if \code{expected = TRUE}, else Newton-Raphson.
}
- \item{ilambda, imethod}{
+ \item{ilambda, imethod, zero}{
See \code{\link{CommonVGAMffArguments}} for more information.
}
@@ -53,16 +53,19 @@ pospoisson(link = "loge", earg = list(),
The object is used by modelling functions such as \code{\link{vglm}},
\code{\link{rrvglm}} and \code{\link{vgam}}.
+
}
\references{
Coleman, J. S. and James, J. (1961)
The equilibrium size distribution of freely-forming groups.
\emph{Sociometry}, \bold{24}, 36--45.
+
Documentation accompanying the \pkg{VGAM} package at
\url{http://www.stat.auckland.ac.nz/~yee}
contains further information and examples.
+
}
\author{ Thomas W. Yee }
\note{
@@ -79,21 +82,22 @@ contains further information and examples.
\code{\link{poissonff}},
\code{\link{zipoisson}}.
+
}
\examples{
# Data from Coleman and James (1961)
-cjdat = data.frame(y = 1:6, freq = c(1486, 694, 195, 37, 10, 1))
-fit = vglm(y ~ 1, pospoisson, cjdat, weights = freq)
+cjdata <- data.frame(y = 1:6, freq = c(1486, 694, 195, 37, 10, 1))
+fit <- vglm(y ~ 1, pospoisson, cjdata, weights = freq)
Coef(fit)
summary(fit)
fitted(fit)
-pdat = data.frame(x2 = runif(nn <- 1000)) # Artificial data
-pdat = transform(pdat, lambda = exp(1 - 2 * x2))
-pdat = transform(pdat, y1 = rpospois(nn, lambda))
-with(pdat, table(y1))
-fit = vglm(y1 ~ x2, pospoisson, pdat, trace = TRUE, crit = "coef")
-coef(fit, matrix=TRUE)
+pdata <- data.frame(x2 = runif(nn <- 1000)) # Artificial data
+pdata <- transform(pdata, lambda = exp(1 - 2 * x2))
+pdata <- transform(pdata, y1 = rpospois(nn, lambda))
+with(pdata, table(y1))
+fit <- vglm(y1 ~ x2, pospoisson, pdata, trace = TRUE, crit = "coef")
+coef(fit, matrix = TRUE)
}
\keyword{models}
\keyword{regression}
diff --git a/man/powl.Rd b/man/powl.Rd
index b7accdf..c1542a4 100644
--- a/man/powl.Rd
+++ b/man/powl.Rd
@@ -8,7 +8,7 @@
}
\usage{
-powl(theta, earg = list(power=1), inverse = FALSE, deriv = 0,
+powl(theta, power = 1, inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE)
}
%- maybe also 'usage' for other objects documented here.
@@ -18,31 +18,18 @@ powl(theta, earg = list(power=1), inverse = FALSE, deriv = 0,
See below for further details.
}
- \item{earg}{
- List. Extra argument for passing in additional information.
- Here, the component name \code{power} denotes the power or exponent.
- This component name should not be abbreviated.
+ \item{power}{
+ This denotes the power or exponent.
- }
- \item{inverse}{
- Logical. If \code{TRUE} the inverse function is computed.
}
- \item{deriv}{
- Order of the derivative. Integer with value 0, 1 or 2.
- }
- \item{short}{
- Used for labelling the \code{blurb} slot of a
- \code{\link{vglmff-class}} object.
+ \item{inverse, deriv, short, tag}{
+ Details at \code{\link{Links}}.
- }
- \item{tag}{
- Used for labelling the linear/additive predictor in the
- \code{initialize} slot of a \code{\link{vglmff-class}} object.
- Contains a little more information if \code{TRUE}.
}
+
}
\details{
The power link function raises a parameter by a certain value of
@@ -51,8 +38,7 @@ powl(theta, earg = list(power=1), inverse = FALSE, deriv = 0,
problems, e.g., if \code{power=0.5} and \code{theta} is
negative.
- The arguments \code{short} and \code{tag} are used only if
- \code{theta} is character.
+
}
\value{
@@ -61,11 +47,13 @@ powl(theta, earg = list(power=1), inverse = FALSE, deriv = 0,
And if \code{inverse = TRUE} then
\code{theta} raised to the power of \code{1/power}.
+
For \code{deriv = 1}, then the function returns
\emph{d} \code{theta} / \emph{d} \code{eta} as a function of \code{theta}
if \code{inverse = FALSE},
else if \code{inverse = TRUE} then it returns the reciprocal.
+
}
%\references{
% McCullagh, P. and Nelder, J. A. (1989)
@@ -79,6 +67,7 @@ powl(theta, earg = list(power=1), inverse = FALSE, deriv = 0,
\code{theta} and \code{power}.
Consequently this link function should be used with caution.
+
}
\seealso{
@@ -86,24 +75,19 @@ powl(theta, earg = list(power=1), inverse = FALSE, deriv = 0,
\code{\link{loge}}.
}
\examples{
-powl("a", earg=list(power=2), short=FALSE, tag=TRUE)
-
+powl("a", power = 2, short = FALSE, tag = TRUE)
powl(x <- 1:5)
-powl(x, earg=list(power=2))
-
-earg=list(power=2)
-max(abs(powl(powl(x, earg=earg), earg=earg, inverse=TRUE) - x)) # Should be 0
-
-powl(x <- (-5):5, earg=list(power=0.5)) # Has NAs
+powl(x, power = 2)
+max(abs(powl(powl(x, power = 2), power = 2, inverse=TRUE) - x)) # Should be 0
+powl(x <- (-5):5, power = 0.5) # Has NAs
# 1/2 = 0.5
-pdat = data.frame(y = rbeta(n=1000, shape1=2^2, shape2=3^2))
-fit = vglm(y ~ 1, beta.ab(lshape1="powl", lshape2="powl",
- eshape1=list(power=0.5), i1=3,
- eshape2=list(power=0.5), i2=7), pdat)
-t(coef(fit, matrix=TRUE))
-Coef(fit) # Useful for intercept-only models
-vcov(fit, untrans=TRUE)
+pdata <- data.frame(y = rbeta(n = 1000, shape1 = 2^2, shape2 = 3^2))
+fit <- vglm(y ~ 1, beta.ab(lshape1 = powl(power = 0.5), i1 = 3,
+ lshape2 = powl(power = 0.5), i2 = 7), pdata)
+t(coef(fit, matrix = TRUE))
+Coef(fit) # Useful for intercept-only models
+vcov(fit, untransform = TRUE)
}
\keyword{math}
\keyword{models}
diff --git a/man/predictvglm.Rd b/man/predictvglm.Rd
index 176efe6..a27fda7 100644
--- a/man/predictvglm.Rd
+++ b/man/predictvglm.Rd
@@ -5,71 +5,100 @@
\description{
Predicted values based on a vector generalized linear model (VGLM)
object.
+
}
\usage{
-predictvglm(object, newdata = NULL,
- type = c("link", "response", "terms"),
+predictvglm(object, newdata = NULL,
+ type = c("link", "response", "terms"),
se.fit = FALSE, deriv = 0, dispersion = NULL,
untransform = FALSE, extra = object at extra, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{
- Object of class inheriting from \code{"vlm"}.
+ Object of class inheriting from \code{"vlm"},
+ e.g., \code{\link{vglm}}.
+
}
\item{newdata}{
An optional data frame in which to look for variables with which
to predict. If omitted, the fitted linear predictors are used.
+
}
\item{type}{
- the type of prediction required. The default is the first one,
- meaning on the scale of the linear predictors. The alternative
- \code{"response"} is on the scale of the response variable, and
- depending on the family function, this may or may not be the mean.
- The \code{"terms"} option returns a matrix giving the fitted values
- of each term in the model formula on the linear predictor scale.
-
The value of this argument can be abbreviated.
+ The type of prediction required. The default is the first one,
+ meaning on the scale of the linear predictors.
+ This should be a \eqn{n \times M}{n x M} matrix.
+
+
+ The alternative \code{"response"} is on the scale of the
+ response variable, and depending on the family function,
+ this may or may not be the mean.
+ Often this is the fitted value, e.g.,
+ \code{fitted(vglmObject)}
+ (see \code{\link{fittedvlm}}).
+ Note that the response is output from the \code{@linkinv} slot,
+ where the \code{eta} argument is the \eqn{n \times M}{n x M} matrix
+ of linear predictors.
+
+
+ The \code{"terms"} option returns a matrix giving the
+ fitted values of each term in the model formula on the
+ linear predictor scale.
+ The terms have been centered.
+
}
\item{se.fit}{
logical: return standard errors?
+
}
\item{deriv}{
Non-negative integer. Currently this must be zero.
Later, this may be implemented for general values.
+
}
\item{dispersion}{
Dispersion parameter.
This may be inputted at this stage, but the default is to use
the dispersion parameter of the fitted model.
+
}
\item{extra}{
A list containing extra information.
This argument should be ignored.
+
}
\item{untransform}{
Logical. Reverses any parameter link function.
- This argument only works if \code{type = "link", se.fit = FALSE, deriv = 0}.
+ This argument only works if
+ \code{type = "link", se.fit = FALSE, deriv = 0}.
+
}
\item{\dots}{Arguments passed into \code{predictvlm}.
+
+
+
}
}
\details{
- Obtains predictions and optionally estimates standard errors
- of those
- predictions from a fitted vector generalized linear model
- (VGLM) object.
+ Obtains predictions and optionally estimates
+ standard errors of those predictions from a fitted
+ \code{\link{vglm}} object.
+
+
+ This code implements \emph{smart prediction} (see
+ \code{\link{smartpred}}).
+
- This code implements \emph{smart prediction}
- (see \code{\link{smartpred}}).
}
\value{
@@ -80,13 +109,16 @@ predictvglm(object, newdata = NULL,
\item{df}{Degrees of freedom}
\item{sigma}{The square root of the dispersion parameter}
+
}
\references{
+
Yee, T. W. and Hastie, T. J. (2003)
Reduced-rank vector generalized linear models.
\emph{Statistical Modelling},
\bold{3}, 15--41.
+
}
\author{ Thomas W. Yee }
@@ -94,11 +126,13 @@ Reduced-rank vector generalized linear models.
Setting \code{se.fit = TRUE} and \code{type = "response"}
will generate an error.
+
}
\section{Warning }{
This function may change in the future.
+
}
\seealso{
@@ -107,31 +141,32 @@ Reduced-rank vector generalized linear models.
\code{predictvlm},
\code{\link{smartpred}}.
+
}
\examples{
# Illustrates smart prediction
-pneumo = transform(pneumo, let = log(exposure.time))
-fit = vglm(cbind(normal,mild, severe) ~ poly(c(scale(let)), 2),
- propodds, data = pneumo, trace = TRUE, x = FALSE)
+pneumo <- transform(pneumo, let = log(exposure.time))
+fit <- vglm(cbind(normal, mild, severe) ~ poly(c(scale(let)), 2),
+ propodds, data = pneumo, trace = TRUE, x.arg = FALSE)
class(fit)
-(q0 = head(predict(fit)))
-(q1 = predict(fit, newdata = head(pneumo)))
-(q2 = predict(fit, newdata = head(pneumo)))
-all.equal(q0, q1) # Should be TRUE
-all.equal(q1, q2) # Should be TRUE
+(q0 <- head(predict(fit)))
+(q1 <- predict(fit, newdata = head(pneumo)))
+(q2 <- predict(fit, newdata = head(pneumo)))
+all.equal(q0, q1) # Should be TRUE
+all.equal(q1, q2) # Should be TRUE
head(predict(fit))
head(predict(fit, untransform = TRUE))
-p0 = head(predict(fit, type = "res"))
-p1 = head(predict(fit, type = "res", newdata = pneumo))
-p2 = head(predict(fit, type = "res", newdata = pneumo))
-p3 = head(fitted(fit))
-all.equal(p0, p1) # Should be TRUE
-all.equal(p1, p2) # Should be TRUE
-all.equal(p2, p3) # Should be TRUE
+p0 <- head(predict(fit, type = "response"))
+p1 <- head(predict(fit, type = "response", newdata = pneumo))
+p2 <- head(predict(fit, type = "response", newdata = pneumo))
+p3 <- head(fitted(fit))
+all.equal(p0, p1) # Should be TRUE
+all.equal(p1, p2) # Should be TRUE
+all.equal(p2, p3) # Should be TRUE
predict(fit, type = "terms", se = TRUE)
}
diff --git a/man/prentice74.Rd b/man/prentice74.Rd
index 3f55c14..af31dce 100644
--- a/man/prentice74.Rd
+++ b/man/prentice74.Rd
@@ -5,10 +5,10 @@
\description{
Estimation of a 3-parameter log-gamma distribution described by
Prentice (1974).
+
}
\usage{
prentice74(llocation = "identity", lscale = "loge", lshape = "identity",
- elocation = list(), escale = list(), eshape = list(),
ilocation = NULL, iscale = NULL, ishape = NULL, zero = 2:3)
}
%- maybe also 'usage' for other objects documented here.
@@ -21,11 +21,6 @@ prentice74(llocation = "identity", lscale = "loge", lshape = "identity",
See \code{\link{Links}} for more choices.
}
- \item{elocation, escale, eshape}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{ilocation, iscale}{
Initial value for \eqn{a} and \eqn{b}, respectively.
The defaults mean an initial value is determined internally for each.
@@ -93,6 +88,7 @@ else \eqn{q < 0} is right skew.
The special case \eqn{q = 0} is not handled, therefore
estimates of \eqn{q} too close to zero may cause numerical problems.
+
}
\author{ T. W. Yee }
\note{
@@ -101,19 +97,21 @@ else \eqn{q < 0} is right skew.
\eqn{\sigma = b}{sigma = b}.
Fisher scoring is used.
+
}
\seealso{
\code{\link{lgamma3ff}},
\code{\link[base:Special]{lgamma}},
\code{\link{gengamma}}.
+
}
\examples{
-pdat = data.frame(x = runif(nn <- 1000))
-pdat = transform(pdat, loc = -1 + 2*x, Scale = exp(1))
-pdat = transform(pdat, y = rlgamma(nn, loc = loc, scale = Scale, k = 1))
-fit = vglm(y ~ x, prentice74(zero = 2:3), pdat, trace = TRUE)
-coef(fit, matrix = TRUE) # Note the coefficients for location
+pdata <- data.frame(x2 = runif(nn <- 1000))
+pdata <- transform(pdata, loc = -1 + 2*x2, Scale = exp(1))
+pdata <- transform(pdata, y = rlgamma(nn, loc = loc, scale = Scale, k = 1))
+fit <- vglm(y ~ x2, prentice74(zero = 2:3), pdata, trace = TRUE)
+coef(fit, matrix = TRUE) # Note the coefficients for location
}
\keyword{models}
\keyword{regression}
diff --git a/man/probit.Rd b/man/probit.Rd
index 1241b9c..45cc882 100644
--- a/man/probit.Rd
+++ b/man/probit.Rd
@@ -7,7 +7,7 @@
first two derivatives.
}
\usage{
-probit(theta, earg = list(), inverse = FALSE, deriv = 0,
+probit(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
short = TRUE, tag = FALSE)
}
%- maybe also 'usage' for other objects documented here.
@@ -17,37 +17,17 @@ probit(theta, earg = list(), inverse = FALSE, deriv = 0,
See below for further details.
}
- \item{earg}{
- Optional list. Extra argument for passing in additional information.
- Values of \code{theta} which are less than or equal to 0 can be
- replaced by the \code{bvalue} component of the list \code{earg}
- before computing the link function value.
- Values of \code{theta} which are greater than or equal to 1 can be
- replaced by 1 minus the \code{bvalue} component of the list \code{earg}
- before computing the link function value.
- The component name \code{bvalue} stands for ``boundary value''.
- See \code{\link{Links}} for general information about \code{earg}.
+ \item{bvalue}{
+ See \code{\link{Links}}.
- }
- \item{inverse}{
- Logical. If \code{TRUE} the inverse function is computed.
}
- \item{deriv}{
- Order of the derivative. Integer with value 0, 1 or 2.
+ \item{inverse, deriv, short, tag}{
+ Details at \code{\link{Links}}.
- }
- \item{short}{
- Used for labelling the \code{blurb} slot of a \code{\link{vglmff-class}}
- object.
}
- \item{tag}{
- Used for labelling the linear/additive predictor in the
- \code{initialize} slot of a \code{\link{vglmff-class}} object.
- Contains a little more information if \code{TRUE}.
- }
}
\details{
The probit link function is commonly used for parameters that
@@ -55,8 +35,7 @@ probit(theta, earg = list(), inverse = FALSE, deriv = 0,
Numerical values of \code{theta} close to 0 or 1 or out of range
result in
\code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
- The arguments \code{short} and \code{tag} are used only if
- \code{theta} is character.
+
}
@@ -83,7 +62,7 @@ probit(theta, earg = list(), inverse = FALSE, deriv = 0,
\note{
Numerical instability may occur when \code{theta} is close to 1 or 0.
- One way of overcoming this is to use \code{earg}.
+ One way of overcoming this is to use \code{bvalue}.
In terms of the threshold approach with cumulative probabilities for
@@ -101,15 +80,15 @@ probit(theta, earg = list(), inverse = FALSE, deriv = 0,
}
\examples{
-p = seq(0.01, 0.99, by = 0.01)
+p <- seq(0.01, 0.99, by = 0.01)
probit(p)
max(abs(probit(probit(p), inverse = TRUE) - p)) # Should be 0
-p = c(seq(-0.02, 0.02, by = 0.01), seq(0.97, 1.02, by = 0.01))
-probit(p) # Has NAs
-probit(p, earg = list(bvalue = .Machine$double.eps)) # Has no NAs
+p <- c(seq(-0.02, 0.02, by = 0.01), seq(0.97, 1.02, by = 0.01))
+probit(p) # Has NAs
+probit(p, bvalue = .Machine$double.eps) # Has no NAs
-\dontrun{p = seq(0.01, 0.99, by = 0.01); par(lwd = (mylwd <- 2))
+\dontrun{p <- seq(0.01, 0.99, by = 0.01); par(lwd = (mylwd <- 2))
plot(p, logit(p), type = "l", col = "limegreen", ylab = "transformation",
las = 1, main = "Some probability link functions")
lines(p, probit(p), col = "purple")
@@ -117,7 +96,7 @@ lines(p, cloglog(p), col = "chocolate")
lines(p, cauchit(p), col = "tan")
abline(v = 0.5, h = 0, lty = "dashed")
legend(0.1, 4.0, c("logit", "probit", "cloglog", "cauchit"),
- col = c("limegreen","purple","chocolate","tan"), lwd = mylwd)
+ col = c("limegreen", "purple", "chocolate", "tan"), lwd = mylwd)
par(lwd = 1) }
}
\keyword{math}
diff --git a/man/propodds.Rd b/man/propodds.Rd
index dcb782f..cdeb047 100644
--- a/man/propodds.Rd
+++ b/man/propodds.Rd
@@ -69,7 +69,8 @@ contains further information and examples.
\author{ Thomas W. Yee }
\section{Warning }{
- No check is made to verify that the response is ordinal;
+ No check is made to verify that the response is ordinal if the
+ response is a matrix;
see \code{\link[base:factor]{ordered}}.
@@ -82,8 +83,8 @@ contains further information and examples.
}
\examples{
# Fit the proportional odds model, p.179, in McCullagh and Nelder (1989)
-pneumo = transform(pneumo, let = log(exposure.time))
-(fit = vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo))
+pneumo <- transform(pneumo, let = log(exposure.time))
+(fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo))
depvar(fit) # Sample proportions
weights(fit, type = "prior") # Number of observations
coef(fit, matrix = TRUE)
@@ -91,16 +92,17 @@ constraints(fit) # Constraint matrices
summary(fit)
# Check that the model is linear in let ----------------------
-fit2 = vgam(cbind(normal, mild, severe) ~ s(let, df = 2), propodds, pneumo)
+fit2 <- vgam(cbind(normal, mild, severe) ~ s(let, df = 2), propodds, pneumo)
\dontrun{ plot(fit2, se = TRUE, lcol = 2, scol = 2) }
# Check the proportional odds assumption with a LRT ----------
-(fit3 = vglm(cbind(normal, mild, severe) ~ let,
- cumulative(parallel = FALSE, reverse = TRUE), pneumo))
+(fit3 <- vglm(cbind(normal, mild, severe) ~ let,
+ cumulative(parallel = FALSE, reverse = TRUE), pneumo))
pchisq(deviance(fit) - deviance(fit3),
df = df.residual(fit) - df.residual(fit3), lower.tail = FALSE)
+lrtest(fit3, fit) # Easier
}
\keyword{models}
\keyword{regression}
-% pneumo$let = log(pneumo$exposure.time)
+% pneumo$let <- log(pneumo$exposure.time)
diff --git a/man/prplot.Rd b/man/prplot.Rd
index 3b5a29b..c1d4c77 100644
--- a/man/prplot.Rd
+++ b/man/prplot.Rd
@@ -83,12 +83,12 @@ prplot.control(xlab = NULL, ylab = "Probability", main = NULL, xlim = NULL,
}
\examples{
-pneumo = transform(pneumo, let = log(exposure.time))
-fit = vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo)
-M = fit at misc$M
+pneumo <- transform(pneumo, let = log(exposure.time))
+fit <- vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo)
+M <- npred(fit) # Or fit at misc$M
\dontrun{ prplot(fit)
prplot(fit, lty = 1:M, col = (1:M)+2, rug = TRUE, las = 1,
- ylim = c(0,1), rlwd = 2) }
+ ylim = c(0, 1), rlwd = 2) }
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
diff --git a/man/qrrvglm.control.Rd b/man/qrrvglm.control.Rd
index da9f5fe..57c0caf 100644
--- a/man/qrrvglm.control.Rd
+++ b/man/qrrvglm.control.Rd
@@ -459,26 +459,27 @@ Constrained additive ordination.
}
\examples{
-# Poisson CQO with equal tolerances
-set.seed(111) # This leads to the global solution
-hspider[,1:6] = scale(hspider[,1:6]) # Good idea when ITolerances = TRUE
-p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
+\dontrun{ # Poisson CQO with equal tolerances
+set.seed(111) # This leads to the global solution
+hspider[,1:6] <- scale(hspider[,1:6]) # Good idea when ITolerances = TRUE
+p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
- WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- quasipoissonff, data = hspider, EqualTolerances = TRUE)
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ quasipoissonff, data = hspider, EqualTolerances = TRUE)
sort(p1 at misc$deviance.Bestof) # A history of all the iterations
-(isdlv = apply(lv(p1), 2, sd)) # Should be approx isdlv
+(isdlv <- apply(lv(p1), 2, sd)) # Should be approx isdlv
# Refit the model with better initial values
set.seed(111) # This leads to the global solution
-p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
+p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
- WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- ITolerances = TRUE, isdlv = isdlv, # Note the use of isdlv here
- fam = quasipoissonff, data = hspider)
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ ITolerances = TRUE, isdlv = isdlv, # Note the use of isdlv here
+ quasipoissonff, data = hspider)
sort(p1 at misc$deviance.Bestof) # A history of all the iterations
}
+}
\keyword{models}
\keyword{regression}
diff --git a/man/qtplot.gumbel.Rd b/man/qtplot.gumbel.Rd
index 0c1c95b..70566ba 100644
--- a/man/qtplot.gumbel.Rd
+++ b/man/qtplot.gumbel.Rd
@@ -18,18 +18,28 @@ qtplot.gumbel(object, plot.it = TRUE,
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{object}{ A \pkg{VGAM} extremes model of the
- Gumbel type, produced by modelling functions such as \code{\link{vglm}}
+ \item{object}{
+ A \pkg{VGAM} extremes model of the
+ Gumbel type, produced by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}} with a family function either
- \code{"gumbel"} or \code{"egumbel"}. }
+ \code{"gumbel"} or \code{"egumbel"}.
+
+ }
\item{plot.it}{
Logical. Plot it? If \code{FALSE} no plot will be done.
+
+ }
+ \item{y.arg}{
+ Logical. Add the raw data on to the plot?
+
}
- \item{y.arg}{ Logical. Add the raw data on to the plot? }
- \item{spline.fit}{ Logical. Use a spline fit through the fitted
- percentiles? This can be useful if there are large gaps
- between some values along the covariate.
+ \item{spline.fit}{
+ Logical. Use a spline fit through the fitted
+ percentiles? This can be useful if there are large gaps
+ between some values along the covariate.
+
+
}
\item{label}{ Logical. Label the percentiles? }
\item{R}{ See \code{\link{gumbel}}. }
@@ -57,9 +67,12 @@ qtplot.gumbel(object, plot.it = TRUE,
\item{tadj}{ Text justification.
See the \code{adj} argument of \code{\link[graphics]{par}}.
}
- \item{\dots}{ Arguments passed into the \code{plot} function
+ \item{\dots}{
+ Arguments passed into the \code{plot} function
when setting up the entire plot. Useful arguments here include
\code{sub} and \code{las}.
+
+
}
@@ -67,41 +80,51 @@ qtplot.gumbel(object, plot.it = TRUE,
\details{
There should be a single covariate such as time.
The quantiles specified by \code{percentiles} are plotted.
+
+
}
\value{
The object with a list called \code{qtplot} in the \code{post}
slot of \code{object}.
- (If \code{plot.it=FALSE} then just the list is returned.)
+ (If \code{plot.it = FALSE} then just the list is returned.)
The list contains components
- \item{fitted.values}{ The percentiles of the response,
- possibly including the MPV. }
- \item{percentiles }{The percentiles (small vector of values between
- 0 and 100. }
+ \item{fitted.values}{
+ The percentiles of the response,
+ possibly including the MPV.
+
+ }
+ \item{percentiles}{
+ The percentiles (small vector of values between 0 and 100.
+
+
+ }
}
%\references{ ~put references to the literature/web site here ~ }
\author{ Thomas W. Yee }
\note{
Unlike \code{\link{gumbel}}, one cannot have
- \code{percentiles=NULL}.
+ \code{percentiles = NULL}.
+
}
\seealso{
\code{\link{gumbel}}.
+
+
}
\examples{
-y = as.matrix(venice[,paste("r",1:10,sep="")])
-fit1 = vgam(y ~ s(year, df=3), gumbel(R=365, mpv=TRUE),
- data=venice, trace=TRUE, na.action=na.pass)
+ymat <- as.matrix(venice[, paste("r", 1:10, sep = "")])
+fit1 <- vgam(ymat ~ s(year, df = 3), gumbel(R = 365, mpv = TRUE),
+ data = venice, trace = TRUE, na.action = na.pass)
head(fitted(fit1))
-\dontrun{
-par(mfrow=c(1,1), bty="l", xpd=TRUE, las=1)
-qtplot(fit1, mpv=TRUE, lcol=c(1,2,5), tcol=c(1,2,5), lwd=2,
- pcol="blue", tadj=0.4)
+\dontrun{ par(mfrow = c(1, 1), bty = "l", xpd = TRUE, las = 1)
+qtplot(fit1, mpv = TRUE, lcol = c(1, 2, 5), tcol = c(1, 2, 5),
+ lwd = 2, pcol = "blue", tadj = 0.4, ylab = "Sea level (cm)")
-qtplot(fit1, perc=97, mpv=FALSE, lcol=3, tcol=3,
- lwd=2, tadj=0.4, add=TRUE) -> i
-head(i at post$qtplot$fitted)
+qtplot(fit1, perc = 97, mpv = FALSE, lcol = 3, tcol = 3,
+ lwd = 2, tadj = 0.4, add = TRUE) -> saved
+head(saved at post$qtplot$fitted)
}
}
\keyword{graphs}
diff --git a/man/qtplot.lmscreg.Rd b/man/qtplot.lmscreg.Rd
index 313fed4..0009d78 100644
--- a/man/qtplot.lmscreg.Rd
+++ b/man/qtplot.lmscreg.Rd
@@ -69,9 +69,9 @@ contains further information and examples.
}
\examples{\dontrun{
-fit = vgam(BMI ~ s(age, df=c(4,2)), fam=lms.bcn(zero=1), data=bmi.nz)
+fit <- vgam(BMI ~ s(age, df = c(4, 2)), lms.bcn(zero=1), data = bmi.nz)
qtplot(fit)
-qtplot(fit, perc=c(25,50,75,95), lcol="blue", tcol="blue", llwd=2)
+qtplot(fit, perc = c(25, 50, 75, 95), lcol = "blue", tcol = "blue", llwd = 2)
}
}
\keyword{graphs}
diff --git a/man/quasibinomialff.Rd b/man/quasibinomialff.Rd
index 2ea59b8..26a0453 100644
--- a/man/quasibinomialff.Rd
+++ b/man/quasibinomialff.Rd
@@ -23,10 +23,12 @@ quasibinomialff(link = "logit", mv = FALSE, onedpar = !mv,
of the response matrix. In this case, the response matrix should have
zero/one values only.
+
If \code{FALSE} and the response is a (2-column) matrix, then the
number of successes is given in the first column and the second column
is the number of failures.
+
}
\item{onedpar}{
One dispersion parameter? If \code{mv}, then a separate dispersion
@@ -34,6 +36,7 @@ quasibinomialff(link = "logit", mv = FALSE, onedpar = !mv,
Setting \code{onedpar=TRUE} will pool them so that there is only one
dispersion parameter to be estimated.
+
}
\item{parallel}{
A logical or formula. Used only if \code{mv} is \code{TRUE}. This
@@ -41,6 +44,7 @@ quasibinomialff(link = "logit", mv = FALSE, onedpar = !mv,
coefficients for a variable is constrained to be equal over the \eqn{M}
linear/additive predictors.
+
}
\item{zero}{
An integer-valued vector specifying which linear/additive predictors
@@ -48,6 +52,7 @@ quasibinomialff(link = "logit", mv = FALSE, onedpar = !mv,
\{1,2,\ldots,\eqn{M}\}, where \eqn{M} is the number of columns of
the matrix response.
+
}
}
\details{
@@ -55,15 +60,18 @@ quasibinomialff(link = "logit", mv = FALSE, onedpar = !mv,
dispersion parameter is unknown (see pp.124--8 of McCullagh and Nelder
(1989) for more details).
+
A dispersion parameter that is less/greater than unity corresponds to
under-/over-dispersion relative to the binomial model. Over-dispersion
is more common in practice.
+
Setting \code{mv=TRUE} is necessary when fitting a Quadratic RR-VGLM
(see \code{\link{cqo}}) because the response will be a matrix of
\eqn{M} columns (e.g., one column per species). Then there will be
\eqn{M} dispersion parameters (one per column of the response).
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -74,10 +82,13 @@ quasibinomialff(link = "logit", mv = FALSE, onedpar = !mv,
\code{\link{cqo}},
and \code{\link{cao}}.
+
}
\references{
- McCullagh, P. and Nelder, J. A. (1989)
+ McCullagh, P. and Nelder, J. A. (1989)
\emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
+
}
\author{ Thomas W. Yee }
\note{
@@ -89,23 +100,38 @@ quasibinomialff(link = "logit", mv = FALSE, onedpar = !mv,
of proportions, you will need to specify \code{weights} because the
number of trials is needed.
+
If \code{mv} is \code{TRUE}, then the matrix response can only be of
one format: a matrix of 1's and 0's (1=success).
+
This function is only a front-end to the \pkg{VGAM} family function
\code{binomialff()}; indeed, \code{quasibinomialff(...)} is equivalent
to \code{binomialff(..., dispersion=0)}. Here, the argument
\code{dispersion=0} signifies that the dispersion parameter is to
be estimated.
+
Regardless of whether the dispersion parameter is to be estimated or
not, its value can be seen from the output from the \code{summary()}
of the object.
+
% With the introduction of name spaces for the \pkg{VGAM} package,
% \code{"ff"} can be dropped for this family function.
+
+}
+
+\section{Warning }{
+The log-likelihood pertaining to the ordinary family
+is used to test for convergence during estimation,
+and is printed out in the summary.
+
+
}
+
+
\seealso{
\code{\link{binomialff}},
\code{\link{rrvglm}},
@@ -118,27 +144,29 @@ quasibinomialff(link = "logit", mv = FALSE, onedpar = !mv,
\code{\link{poissonff}},
\code{\link{quasipoissonff}},
\code{\link[stats]{quasibinomial}}.
+
+
}
\examples{
quasibinomialff()
-quasibinomialff(link="probit")
+quasibinomialff(link = "probit")
# Nonparametric logistic regression
-hunua = transform(hunua, a.5 = sqrt(altitude)) # Transformation of altitude
-fit1 = vglm(agaaus ~ poly(a.5, 2), quasibinomialff, hunua)
-fit2 = vgam(agaaus ~ s(a.5, df=2), quasibinomialff, hunua)
+hunua <- transform(hunua, a.5 = sqrt(altitude)) # Transformation of altitude
+fit1 <- vglm(agaaus ~ poly(a.5, 2), quasibinomialff, hunua)
+fit2 <- vgam(agaaus ~ s(a.5, df = 2), quasibinomialff, hunua)
\dontrun{
-plot(fit2, se=TRUE, llwd=2, lcol="red", scol="red",
- xlab="sqrt(altitude)", ylim=c(-3,1),
- main="GAM and quadratic GLM fitted to species data")
-plotvgam(fit1, se=TRUE, lcol="blue", scol="blue", add=TRUE, llwd=2)
+plot(fit2, se = TRUE, llwd = 2, lcol = "orange", scol = "orange",
+ xlab = "sqrt(altitude)", ylim = c(-3, 1),
+ main = "GAM and quadratic GLM fitted to species data")
+plotvgam(fit1, se = TRUE, lcol = "blue", scol = "blue", add = TRUE, llwd = 2)
}
-fit1 at misc$dispersion # dispersion parameter
+fit1 at misc$dispersion # dispersion parameter
logLik(fit1)
# Here, the dispersion parameter defaults to 1
-fit0 = vglm(agaaus ~ poly(a.5, 2), binomialff, hunua)
-fit0 at misc$dispersion # dispersion parameter
+fit0 <- vglm(agaaus ~ poly(a.5, 2), binomialff, hunua)
+fit0 at misc$dispersion # dispersion parameter
}
\keyword{models}
\keyword{regression}
diff --git a/man/quasipoissonff.Rd b/man/quasipoissonff.Rd
index 4120e59..a6b5cb6 100644
--- a/man/quasipoissonff.Rd
+++ b/man/quasipoissonff.Rd
@@ -66,10 +66,13 @@ quasipoissonff(link = "loge", onedpar = FALSE,
\code{\link{cqo}},
and \code{\link{cao}}.
+
}
\references{
- McCullagh, P. and Nelder, J. A. (1989)
+ McCullagh, P. and Nelder, J. A. (1989)
\emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
+
}
\author{ Thomas W. Yee }
@@ -94,6 +97,13 @@ quasipoissonff(link = "loge", onedpar = FALSE,
}
+
+\section{Warning }{
+ See the warning in \code{\link{quasibinomialff}}.
+
+
+}
+
\seealso{
\code{\link{poissonff}},
\code{\link{negbinomial}},
@@ -105,20 +115,19 @@ quasipoissonff(link = "loge", onedpar = FALSE,
\code{\link{quasibinomialff}},
\code{\link[stats]{quasipoisson}}.
+
}
\examples{
quasipoissonff()
-\dontrun{
-n = 200; p = 5; S = 5
-mydata = rcqo(n, p, S, fam="poisson", EqualTol=FALSE)
-myform = attr(mydata, "formula")
-p1 = cqo(myform, fam=quasipoissonff, EqualTol=FALSE, data=mydata)
+\dontrun{n <- 200; p <- 5; S <- 5
+mydata <- rcqo(n, p, S, fam = "poisson", EqualTol = FALSE)
+myform <- attr(mydata, "formula")
+p1 <- cqo(myform, fam = quasipoissonff, EqualTol = FALSE, data = mydata)
sort(p1 at misc$deviance.Bestof) # A history of all the iterations
-lvplot(p1, y=TRUE, lcol=1:S, pch=1:S, pcol=1:S)
-summary(p1) # The dispersion parameters are estimated
-}
-}
+lvplot(p1, y = TRUE, lcol = 1:S, pch = 1:S, pcol = 1:S)
+summary(p1) # The dispersion parameters are estimated
+}}
\keyword{models}
\keyword{regression}
diff --git a/man/rayleigh.Rd b/man/rayleigh.Rd
index 8dad6d0..80954e3 100644
--- a/man/rayleigh.Rd
+++ b/man/rayleigh.Rd
@@ -9,8 +9,9 @@
}
\usage{
- rayleigh(lscale = "loge", escale = list(), nrfs = 1/3 + 0.01)
-cenrayleigh(lscale = "loge", escale = list(), oim = TRUE)
+ rayleigh(lscale = "loge", nrfs = 1/3 + 0.01,
+ oim.mean = TRUE, zero = NULL)
+cenrayleigh(lscale = "loge", oim = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -20,12 +21,6 @@ cenrayleigh(lscale = "loge", escale = list(), oim = TRUE)
A log link is the default because \eqn{b} is positive.
}
- \item{escale}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information, as well
- as \code{\link{CommonVGAMffArguments}}.
-
- }
\item{nrfs}{
Numeric, of length one, with value in \eqn{[0,1]}.
Weighting factor between Newton-Raphson and Fisher scoring.
@@ -34,6 +29,14 @@ cenrayleigh(lscale = "loge", escale = list(), oim = TRUE)
positive-definite working weights.
}
+ \item{oim.mean}{
+ Logical, used only for intercept-only models.
+ \code{TRUE} means the mean of the OIM elements are used as working weights.
+ If \code{TRUE} then this argument has top priority for working
+ out the working weights.
+ \code{FALSE} means use another algorithm.
+
+ }
\item{oim}{
Logical.
For censored data only,
@@ -41,6 +44,10 @@ cenrayleigh(lscale = "loge", escale = list(), oim = TRUE)
\code{FALSE} means Fisher scoring.
}
+ \item{zero}{
+ Details at \code{\link{CommonVGAMffArguments}}.
+
+ }
}
\details{
The Rayleigh distribution, which is used in physics,
@@ -63,10 +70,14 @@ cenrayleigh(lscale = "loge", escale = list(), oim = TRUE)
in the \code{extra} slot.
+ Th \pkg{VGAM} family function \code{rayleigh} handles multiple responses.
+
+
}
\section{Warning}{
The theory behind the argument \code{oim} is not fully complete.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
diff --git a/man/rayleighUC.Rd b/man/rayleighUC.Rd
index 3c543e8..312169a 100644
--- a/man/rayleighUC.Rd
+++ b/man/rayleighUC.Rd
@@ -9,6 +9,7 @@
Density, distribution function, quantile function and random
generation for the Rayleigh distribution with parameter
\code{a}.
+
}
\usage{
drayleigh(x, scale = 1, log = FALSE)
@@ -36,6 +37,7 @@ rrayleigh(n, scale = 1)
\code{qrayleigh} gives the quantile function, and
\code{rrayleigh} generates random deviates.
+
}
\references{
@@ -43,6 +45,7 @@ Evans, M., Hastings, N. and Peacock, B. (2000)
\emph{Statistical Distributions},
New York: Wiley-Interscience, Third edition.
+
}
\author{ T. W. Yee }
\details{
@@ -52,23 +55,26 @@ New York: Wiley-Interscience, Third edition.
probability density function and range restrictions on
the parameter \eqn{b}.
+
}
\note{
The Rayleigh distribution is related to the Maxwell distribution.
+
}
\seealso{
\code{\link{rayleigh}},
\code{\link{maxwell}}.
+
}
\examples{
-\dontrun{ Scale = 2; x = seq(-1, 8, by = 0.1)
+\dontrun{ Scale <- 2; x <- seq(-1, 8, by = 0.1)
plot(x, drayleigh(x, scale = Scale), type = "l", ylim = c(0,1),
las = 1, ylab = "",
main = "Rayleigh density divided into 10 equal areas; orange = cdf")
abline(h = 0, col = "blue", lty = 2)
-qq = qrayleigh(seq(0.1, 0.9, by = 0.1), scale = Scale)
+qq <- qrayleigh(seq(0.1, 0.9, by = 0.1), scale = Scale)
lines(qq, drayleigh(qq, scale = Scale), col = "purple", lty = 3, type = "h")
lines(x, prayleigh(x, scale = Scale), col = "orange") }
}
diff --git a/man/rcqo.Rd b/man/rcqo.Rd
index ebb16fb..395a56d 100644
--- a/man/rcqo.Rd
+++ b/man/rcqo.Rd
@@ -338,6 +338,7 @@ A theory of gradient analysis.
Yet to do: add an argument that allows absences to be equal
to the first level if ordinal data is requested.
+
}
\seealso{
\code{\link{cqo}},
@@ -349,52 +350,50 @@ A theory of gradient analysis.
\code{\link{gamma2}},
\code{\link{gaussianff}}.
+
}
\examples{
+\dontrun{
# Example 1: Species packing model:
-n = 100; p = 5; S = 5
-mydata = rcqo(n, p, S, ESOpt = TRUE, EqualMax = TRUE)
+n <- 100; p <- 5; S <- 5
+mydata <- rcqo(n, p, S, ESOpt = TRUE, EqualMax = TRUE)
names(mydata)
-(myform = attr(mydata, "formula"))
-fit = cqo(myform, poissonff, mydata, Bestof = 3) # EqualTol = TRUE
-\dontrun{
-matplot(attr(mydata, "lv"), mydata[,-(1:(p-1))], col=1:S)
-persp(fit, col=1:S, add = TRUE)
-lvplot(fit, lcol=1:S, y = TRUE, pcol=1:S) # The same plot as above
-}
+(myform <- attr(mydata, "formula"))
+fit <- cqo(myform, poissonff, mydata, Bestof = 3) # EqualTol = TRUE
+matplot(attr(mydata, "lv"), mydata[,-(1:(p-1))], col = 1:S)
+persp(fit, col = 1:S, add = TRUE)
+lvplot(fit, lcol = 1:S, y = TRUE, pcol = 1:S) # The same plot as above
# Compare the fitted model with the 'truth'
-ccoef(fit) # The fitted model
+ccoef(fit) # The fitted model
attr(mydata, "ccoefficients") # The 'truth'
c(apply(attr(mydata, "lv"), 2, sd), apply(lv(fit), 2, sd)) # Both values should be approx equal
# Example 2: negative binomial data fitted using a Poisson model:
-n = 200; p = 5; S = 5
-mydata = rcqo(n, p, S, fam="negbin", sqrt = TRUE)
-myform = attr(mydata, "formula")
-fit = cqo(myform, fam=poissonff, dat=mydata) # ITol = TRUE,
-\dontrun{
-lvplot(fit, lcol=1:S, y = TRUE, pcol=1:S) }
+n <- 200; p <- 5; S <- 5
+mydata <- rcqo(n, p, S, fam = "negbin", sqrt = TRUE)
+myform <- attr(mydata, "formula")
+fit <- cqo(myform, fam = poissonff, dat = mydata) # ITol = TRUE,
+lvplot(fit, lcol = 1:S, y = TRUE, pcol = 1:S)
# Compare the fitted model with the 'truth'
-ccoef(fit) # The fitted model
+ccoef(fit) # The fitted model
attr(mydata, "ccoefficients") # The 'truth'
# Example 3: gamma2 data fitted using a Gaussian model:
-n = 200; p = 5; S = 3
-mydata = rcqo(n, p, S, fam="gamma2", Log = TRUE)
-fit = cqo(attr(mydata, "formula"), fam=gaussianff, dat=mydata) # ITol=TRUE,
-\dontrun{
-matplot(attr(mydata, "lv"), exp(mydata[,-(1:(p-1))]), col=1:S) # 'raw' data
-lvplot(fit, lcol=1:S, y=TRUE, pcol=1:S) # Fitted model to transformed data
-}
+n <- 200; p <- 5; S <- 3
+mydata <- rcqo(n, p, S, fam = "gamma2", Log = TRUE)
+fit <- cqo(attr(mydata, "formula"), fam = gaussianff, dat = mydata) # ITol = TRUE,
+matplot(attr(mydata, "lv"), exp(mydata[,-(1:(p-1))]), col = 1:S) # 'raw' data
+lvplot(fit, lcol = 1:S, y = TRUE, pcol = 1:S) # Fitted model to transformed data
# Compare the fitted model with the 'truth'
-ccoef(fit) # The fitted model
+ccoef(fit) # The fitted model
attr(mydata, "ccoefficients") # The 'truth'
}
+}
\keyword{distribution}
diff --git a/man/rdiric.Rd b/man/rdiric.Rd
index 44fbd39..a3b93af 100644
--- a/man/rdiric.Rd
+++ b/man/rdiric.Rd
@@ -52,10 +52,10 @@ New York: Springer-Verlag.
}
\examples{
-y = rdiric(n=1000, shape=c(3, 1, 4))
-fit = vglm(y ~ 1, dirichlet, trace = TRUE, crit="c")
+y <- rdiric(n = 1000, shape = c(3, 1, 4))
+fit <- vglm(y ~ 1, dirichlet, trace = TRUE, crit = "c")
Coef(fit)
-coef(fit, matrix=TRUE)
+coef(fit, matrix = TRUE)
}
\keyword{distribution}
diff --git a/man/recexp1.Rd b/man/recexp1.Rd
index 71a5f89..c2bc948 100644
--- a/man/recexp1.Rd
+++ b/man/recexp1.Rd
@@ -8,7 +8,7 @@
record values.
}
\usage{
-recexp1(lrate="loge", irate=NULL, imethod=1)
+recexp1(lrate = "loge", irate = NULL, imethod = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -58,13 +58,13 @@ recexp1(lrate="loge", irate=NULL, imethod=1)
\code{\link{exponential}}.
}
\examples{
-rawy = rexp(n <- 10000, rate=exp(1))
-y = unique(cummax(rawy)) # Keep only the records
+rawy <- rexp(n <- 10000, rate = exp(1))
+y <- unique(cummax(rawy)) # Keep only the records
length(y) / y[length(y)] # MLE of rate
-fit = vglm(y ~ 1, recexp1, trace=TRUE)
-coef(fit, matrix=TRUE)
+fit <- vglm(y ~ 1, recexp1, trace = TRUE)
+coef(fit, matrix = TRUE)
Coef(fit)
}
\keyword{models}
diff --git a/man/reciprocal.Rd b/man/reciprocal.Rd
index 93c9aac..9f925c7 100644
--- a/man/reciprocal.Rd
+++ b/man/reciprocal.Rd
@@ -6,12 +6,13 @@
\description{
Computes the reciprocal transformation, including its inverse and the
first two derivatives.
+
}
\usage{
-reciprocal(theta, earg = list(), inverse.arg = FALSE, deriv = 0,
- short = TRUE, tag = FALSE)
-nreciprocal(theta, earg = list(), inverse.arg = FALSE, deriv = 0,
- short = TRUE, tag = FALSE)
+ reciprocal(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
+nreciprocal(theta, bvalue = NULL, inverse = FALSE, deriv = 0,
+ short = TRUE, tag = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -20,44 +21,30 @@ nreciprocal(theta, earg = list(), inverse.arg = FALSE, deriv = 0,
See below for further details.
}
- \item{earg}{
- Optional list. Extra argument for passing in additional information.
- Values of \code{theta} which are equal to 0 can be
- replaced by the \code{bvalue} component of the list \code{earg}
- before computing the link function value.
- The component name \code{bvalue} stands for ``boundary value''.
- See \code{\link{Links}} for general information about \code{earg}.
+ \item{bvalue}{
+ See \code{\link{Links}}.
- }
- \item{inverse.arg}{
- Logical. If \code{TRUE} the inverse function is computed
}
- \item{deriv}{
- Order of the derivative. Integer with value 0, 1 or 2.
- }
- \item{short}{
- Used for labelling the \code{blurb} slot of a
- \code{\link{vglmff-class}} object.
+ \item{inverse, deriv, short, tag}{
+ Details at \code{\link{Links}}.
- }
- \item{tag}{
- Used for labelling the linear/additive predictor in the
- \code{initialize} slot of a \code{\link{vglmff-class}} object.
- Contains a little more information if \code{TRUE}.
}
+
}
\details{
The \code{reciprocal} link function is a special case of the power link
function. Numerical values of \code{theta} close to 0 result in
- \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}. The arguments
- \code{short} and \code{tag} are used only if \code{theta} is character.
+ \code{Inf}, \code{-Inf}, \code{NA} or \code{NaN}.
+
The \code{nreciprocal} link function computes the negative reciprocal,
i.e., \eqn{-1/ \theta}{-1/theta}.
+
+
}
\value{
For \code{reciprocal}:
@@ -70,10 +57,13 @@ nreciprocal(theta, earg = list(), inverse.arg = FALSE, deriv = 0,
if \code{inverse = FALSE},
else if \code{inverse = TRUE} then it returns the reciprocal.
+
}
\references{
McCullagh, P. and Nelder, J. A. (1989)
\emph{Generalized Linear Models}, 2nd ed. London: Chapman & Hall.
+
+
}
%\section{Warning}{
%}
@@ -81,22 +71,25 @@ nreciprocal(theta, earg = list(), inverse.arg = FALSE, deriv = 0,
\note{ Numerical instability may occur when \code{theta} is
close to 0.
+
+
}
\seealso{
\code{\link{identity}},
\code{\link{powl}}.
+
}
\examples{
-reciprocal(1:5)
-reciprocal(1:5, inverse=TRUE, deriv=2)
+ reciprocal(1:5)
+ reciprocal(1:5, inverse = TRUE, deriv = 2)
nreciprocal(1:5)
-nreciprocal(1:5, inverse=TRUE, deriv=2)
+nreciprocal(1:5, inverse = TRUE, deriv = 2)
-x = (-3):3
-reciprocal(x) # Has Inf
-reciprocal(x, earg=list(bvalue= .Machine$double.eps)) # Has no Inf
+x <- (-3):3
+reciprocal(x) # Has Inf
+reciprocal(x, bvalue = .Machine$double.eps) # Has no Inf
}
\keyword{math}
\keyword{models}
diff --git a/man/recnormal1.Rd b/man/recnormal1.Rd
index 919e9a2..b38d578 100644
--- a/man/recnormal1.Rd
+++ b/man/recnormal1.Rd
@@ -85,13 +85,13 @@ recnormal1(lmean = "identity", lsd = "loge",
}
\examples{
-nn = 10000; mymean = 100
+nn <- 10000; mymean <- 100
# First value is reference value or trivial record
-Rdata = data.frame(rawy = c(mymean, rnorm(nn, me = mymean, sd = exp(3))))
+Rdata <- data.frame(rawy = c(mymean, rnorm(nn, me = mymean, sd = exp(3))))
# Keep only observations that are records:
-rdata = data.frame(y = unique(cummax(with(Rdata, rawy))))
+rdata <- data.frame(y = unique(cummax(with(Rdata, rawy))))
-fit = vglm(y ~ 1, recnormal1, rdata, trace = TRUE, maxit = 200)
+fit <- vglm(y ~ 1, recnormal1, rdata, trace = TRUE, maxit = 200)
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/rhobit.Rd b/man/rhobit.Rd
index 6c75e81..ea68df7 100644
--- a/man/rhobit.Rd
+++ b/man/rhobit.Rd
@@ -7,8 +7,8 @@
first two derivatives.
}
\usage{
-rhobit(theta, earg = list(), inverse = FALSE, deriv = 0,
- short = TRUE, tag = FALSE)
+rhobit(theta, bminvalue = NULL, bmaxvalue = NULL,
+ inverse = FALSE, deriv = 0, short = TRUE, tag = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -17,42 +17,34 @@ rhobit(theta, earg = list(), inverse = FALSE, deriv = 0,
See below for further details.
}
- \item{earg}{
- Optional list. Extra argument for passing in additional information.
- Values of \code{theta} which are less than or equal to -1 can be
- replaced by the \code{bminvalue} component of the list \code{earg}
+ \item{bminvalue, bmaxvalue}{
+ Optional boundary values, e.g.,
+ values of \code{theta} which are less than or equal to -1 can be
+ replaced by \code{bminvalue}
before computing the link function value.
- Values of \code{theta} which are greater than or equal to 1 can be
- replaced by the \code{bmaxvalue} component of the list \code{earg}
+ And values of \code{theta} which are greater than or equal to 1 can be
+ replaced by \code{bmaxvalue}
before computing the link function value.
- See \code{\link{Links}} for general information about \code{earg}.
+ See \code{\link{Links}}.
}
- \item{inverse}{
- Logical. If \code{TRUE} the inverse function is computed.
- }
- \item{deriv}{
- Order of the derivative. Integer with value 0, 1 or 2.
- }
- \item{short}{
- Used for labelling the \code{blurb} slot of a
- \code{\link{vglmff-class}} object.
+ \item{inverse, deriv, short, tag}{
+ Details at \code{\link{Links}}.
+
}
- \item{tag}{
- Used for labelling the linear/additive predictor in the
- \code{initialize} slot of a \code{\link{vglmff-class}} object.
- Contains a little more information if \code{TRUE}. }
+
}
\details{
The \code{rhobit} link function is commonly used for parameters that
lie between \eqn{-1} and \eqn{1}. Numerical values of \code{theta}
close to \eqn{-1} or \eqn{1} or out of range result in \code{Inf},
- \code{-Inf}, \code{NA} or \code{NaN}. The arguments \code{short}
- and \code{tag} are used only if \code{theta} is character.
+ \code{-Inf}, \code{NA} or \code{NaN}.
+
+
}
\value{
@@ -60,51 +52,59 @@ rhobit(theta, earg = list(), inverse = FALSE, deriv = 0,
theta)/(1 - theta))} when \code{inverse = FALSE}, and if \code{inverse =
TRUE} then \code{(exp(theta) - 1)/(exp(theta) + 1)}.
+
For \code{deriv = 1}, then the function returns \emph{d} \code{theta}
/ \emph{d} \code{eta} as a function of \code{theta} if \code{inverse =
FALSE}, else if \code{inverse = TRUE} then it returns the reciprocal.
+
}
\references{
Documentation accompanying the \pkg{VGAM} package at
\url{http://www.stat.auckland.ac.nz/~yee}
contains further information and examples.
+
}
\author{ Thomas W. Yee }
\note{
- Numerical instability may occur when \code{theta} is close to \eqn{-1} or \eqn{1}.
- One way of overcoming this is to use \code{earg}.
+ Numerical instability may occur when \code{theta} is close
+ to \eqn{-1} or \eqn{1}. One way of overcoming this is to
+ use \code{bminvalue}, etc.
+
The correlation parameter of a standard bivariate normal distribution
lies between \eqn{-1} and \eqn{1}, therefore this function can be used
for modelling this parameter as a function of explanatory variables.
+
The link function \code{rhobit} is very similar to
\code{\link{fisherz}}, e.g., just twice the value of
\code{\link{fisherz}}.
+
}
\seealso{
\code{\link{Links}},
\code{\link{binom2.rho}},
\code{\link{fisherz}}.
+
}
\examples{
-theta = seq(-0.99, 0.99, by=0.01)
-y = rhobit(theta)
+theta <- seq(-0.99, 0.99, by = 0.01)
+y <- rhobit(theta)
\dontrun{
-plot(theta, y, type="l", las=1, ylab="", main="rhobit(theta)")
-abline(v=0, h=0, lty=2)
+plot(theta, y, type = "l", las = 1, ylab = "", main = "rhobit(theta)")
+abline(v = 0, h = 0, lty = 2)
}
-x = c(seq(-1.02, -0.98, by=0.01), seq(0.97, 1.02, by=0.01))
-rhobit(x) # Has NAs
-rhobit(x, earg=list(bminvalue= -1 + .Machine$double.eps,
- bmaxvalue= 1 - .Machine$double.eps)) # Has no NAs
+x <- c(seq(-1.02, -0.98, by = 0.01), seq(0.97, 1.02, by = 0.01))
+rhobit(x) # Has NAs
+rhobit(x, bminvalue = -1 + .Machine$double.eps,
+ bmaxvalue = 1 - .Machine$double.eps) # Has no NAs
}
\keyword{math}
\keyword{models}
diff --git a/man/riceUC.Rd b/man/riceUC.Rd
index 7532659..0af13b6 100644
--- a/man/riceUC.Rd
+++ b/man/riceUC.Rd
@@ -13,7 +13,7 @@
}
\usage{
-drice(x, vee, sigma, log=FALSE)
+drice(x, vee, sigma, log = FALSE)
%price(q, vee, sigma)
%qrice(p, vee, sigma)
rrice(n, vee, sigma)
@@ -27,7 +27,7 @@ rrice(n, vee, sigma)
}
\item{log}{
Logical.
- If \code{log=TRUE} then the logarithm of the density is returned.
+ If \code{log = TRUE} then the logarithm of the density is returned.
}
@@ -55,13 +55,13 @@ rrice(n, vee, sigma)
}
\examples{
\dontrun{
-x = seq(0.01, 7, len=201)
-plot(x, drice(x, vee=0, sigma=1), type="n", las=1,, ylab="",
- main="Density of Rice distribution for various values of v")
-sigma = 1; vee = c(0,0.5,1,2,4)
-for(ii in 1:length(vee)) lines(x, drice(x, vee[ii], sigma), col=ii)
-legend(x=5, y=0.6, legend=as.character(vee),
- col=1:length(vee), lty=1)
+x <- seq(0.01, 7, len = 201)
+plot(x, drice(x, vee = 0, sigma = 1), type = "n", las = 1,, ylab = "",
+ main = "Density of Rice distribution for various values of v")
+sigma <- 1; vee <- c(0,0.5,1,2,4)
+for(ii in 1:length(vee)) lines(x, drice(x, vee[ii], sigma), col = ii)
+legend(x = 5, y = 0.6, legend = as.character(vee),
+ col = 1:length(vee), lty = 1)
}
}
\keyword{distribution}
diff --git a/man/riceff.Rd b/man/riceff.Rd
index e37767c..822bb9d 100644
--- a/man/riceff.Rd
+++ b/man/riceff.Rd
@@ -8,18 +8,13 @@
}
\usage{
-riceff(lvee="loge", lsigma="loge", evee=list(), esigma=list(),
- ivee=NULL, isigma=NULL, nsimEIM=100, zero=NULL)
+riceff(lvee = "loge", lsigma = "loge",
+ ivee = NULL, isigma = NULL, nsimEIM = 100, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{lvee, evee}{
- Link function and extra argument for the \eqn{v} parameter.
- See \code{\link{Links}} for more choices and for general information.
-
- }
- \item{lsigma, esigma}{
- Link function and extra argument for the \eqn{\sigma}{sigma} parameter.
+ \item{lvee, lsigma}{
+ Link functions for the \eqn{v} and \eqn{\sigma}{sigma} parameters.
See \code{\link{Links}} for more choices and for general information.
}
@@ -61,6 +56,7 @@ riceff(lvee="loge", lsigma="loge", evee=list(), esigma=list(),
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\references{
@@ -75,9 +71,11 @@ Mathematical Analysis of Random Noise.
Convergence problems may occur for data where \eqn{v=0}; if so, use
\code{\link{rayleigh}} or possibly use an \code{\link{identity}} link.
+
When \eqn{v} is large (greater than 3, say) then the mean is approximately
\eqn{v} and the standard deviation is approximately \eqn{\sigma}{sigma}.
+
}
\seealso{
@@ -85,16 +83,18 @@ Mathematical Analysis of Random Noise.
\code{\link{rayleigh}},
\code{\link[base:Bessel]{besselI}}.
+
}
\examples{
-vee = exp(2); sigma = exp(1);
-y = rrice(n <- 1000, vee, sigma)
-fit = vglm(y ~ 1, riceff, trace=TRUE, crit="c")
+\dontrun{ vee <- exp(2); sigma <- exp(1);
+y <- rrice(n <- 1000, vee, sigma)
+fit <- vglm(y ~ 1, riceff, trace = TRUE, crit = "c")
c(mean(y), fitted(fit)[1])
-coef(fit, matrix=TRUE)
+coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
}
+}
\keyword{models}
\keyword{regression}
diff --git a/man/rig.Rd b/man/rig.Rd
index bf008e7..542b66e 100644
--- a/man/rig.Rd
+++ b/man/rig.Rd
@@ -8,8 +8,7 @@
}
\usage{
-rig(lmu = "identity", llambda = "loge",
- emu=list(), elambda=list(), imu = NULL, ilambda = 1)
+rig(lmu = "identity", llambda = "loge", imu = NULL, ilambda = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -23,20 +22,17 @@ rig(lmu = "identity", llambda = "loge",
A \code{NULL} means a value is computed internally.
}
- \item{emu, elambda}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
}
\details{
See Jorgensen (1997) for details.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
Jorgensen, B. (1997)
@@ -54,9 +50,9 @@ London: Chapman & Hall
}
\examples{
-rdata = data.frame(y = rchisq(n=100, df=14)) # Not 'proper' data!!
-fit = vglm(y ~ 1, rig, rdata, trace=TRUE)
-fit = vglm(y ~ 1, rig, rdata, trace=TRUE, eps=1e-9, cri="c")
+rdata <- data.frame(y = rchisq(n = 100, df = 14)) # Not 'proper' data!!
+fit <- vglm(y ~ 1, rig, rdata, trace = TRUE)
+fit <- vglm(y ~ 1, rig, rdata, trace = TRUE, eps = 1e-9, crit = "coef")
summary(fit)
}
\keyword{models}
diff --git a/man/rlplot.egev.Rd b/man/rlplot.egev.Rd
index fc3e680..6bc976b 100644
--- a/man/rlplot.egev.Rd
+++ b/man/rlplot.egev.Rd
@@ -135,12 +135,12 @@ London: Springer-Verlag.
}
\examples{
-gdata = data.frame(y = rgev(n <- 100, scale = 2, shape = -0.1))
-fit = vglm(y ~ 1, egev, gdata, trace = TRUE)
+gdata <- data.frame(y = rgev(n <- 100, scale = 2, shape = -0.1))
+fit <- vglm(y ~ 1, egev, gdata, trace = TRUE)
# Identity link for all parameters:
-fit2 = vglm(y ~ 1, egev(lshape = identity, lscale = identity,
- iscale = 10), gdata, trace = TRUE)
+fit2 <- vglm(y ~ 1, egev(lshape = identity, lscale = identity,
+ iscale = 10), gdata, trace = TRUE)
coef(fit2, matrix = TRUE)
\dontrun{
par(mfrow = c(1, 2))
diff --git a/man/rrar.Rd b/man/rrar.Rd
index 03c1753..a37b7e6 100644
--- a/man/rrar.Rd
+++ b/man/rrar.Rd
@@ -87,16 +87,16 @@ time series.
}
\examples{
-year = seq(1961 + 1/12, 1972 + 10/12, by = 1/12)
+year <- seq(1961 + 1/12, 1972 + 10/12, by = 1/12)
\dontrun{ par(mar = c(4, 4, 2, 2) + 0.1, mfrow = c(2, 2))
for(ii in 1:4) {
- plot(year, grain.us[, ii], main = names(grain.us)[ii], las = 1,
- type = "l", xlab = "", ylab = "", col = "blue")
- points(year, grain.us[,ii], pch = "*", col = "blue")
+ plot(year, grain.us[, ii], main = names(grain.us)[ii], las = 1,
+ type = "l", xlab = "", ylab = "", col = "blue")
+ points(year, grain.us[,ii], pch = "*", col = "blue")
}}
apply(grain.us, 2, mean) # mu vector
-cgrain = scale(grain.us, scale = FALSE) # Center the time series only
-fit = vglm(cgrain ~ 1, rrar(Ranks = c(4, 1)), trace = TRUE)
+cgrain <- scale(grain.us, scale = FALSE) # Center the time series only
+fit <- vglm(cgrain ~ 1, rrar(Ranks = c(4, 1)), trace = TRUE)
summary(fit)
print(fit at misc$Ak1, dig = 2)
diff --git a/man/rrvglm.Rd b/man/rrvglm.Rd
index 9efd118..1105e5b 100644
--- a/man/rrvglm.Rd
+++ b/man/rrvglm.Rd
@@ -256,32 +256,32 @@ se.a21.hat <- sqrt(vcov(rrnb2)["I(lv.mat)", "I(lv.mat)"])
ci.a21 <- a21.hat + c(-1, 1) * 1.96 * se.a21.hat
(ci.delta2 <- 2 - rev(ci.a21)) # The 95 percent confidence interval
-confint_rrnb(rrnb2) # Quick way to get it
+confint_rrnb(rrnb2) # Quick way to get it
# Plot the abundances and fitted values against the latent variable
\dontrun{
plot(y2 ~ lv(rrnb2), data = mydata, col = "blue",
- xlab = "Latent variable", las = 1)
+ xlab = "Latent variable", las = 1)
ooo <- order(lv(rrnb2))
lines(fitted(rrnb2)[ooo] ~ lv(rrnb2)[ooo], col = "red") }
# Example 2: stereotype model (reduced-rank multinomial logit model)
data(car.all)
-index = with(car.all, Country == "Germany" | Country == "USA" |
- Country == "Japan" | Country == "Korea")
-scar = car.all[index, ] # standardized car data
-fcols = c(13,14,18:20,22:26,29:31,33,34,36) # These are factors
+index <- with(car.all, Country == "Germany" | Country == "USA" |
+ Country == "Japan" | Country == "Korea")
+scar <- car.all[index, ] # standardized car data
+fcols <- c(13,14,18:20,22:26,29:31,33,34,36) # These are factors
scar[,-fcols] = scale(scar[, -fcols]) # Standardize all numerical vars
-ones = matrix(1, 3, 1)
-clist = list("(Intercept)" = diag(3), Width = ones, Weight = ones,
- Disp. = diag(3), Tank = diag(3), Price = diag(3),
- Frt.Leg.Room = diag(3))
+ones <- matrix(1, 3, 1)
+clist <- list("(Intercept)" = diag(3), Width = ones, Weight = ones,
+ Disp. = diag(3), Tank = diag(3), Price = diag(3),
+ Frt.Leg.Room = diag(3))
set.seed(111)
-fit = rrvglm(Country ~ Width + Weight + Disp. + Tank + Price + Frt.Leg.Room,
- multinomial, data = scar, Rank = 2, trace = TRUE,
- constraints = clist, Norrr = ~ 1 + Width + Weight,
- Uncor = TRUE, Corner = FALSE, Bestof = 2)
-fit at misc$deviance # A history of the fits
+fit <- rrvglm(Country ~ Width + Weight + Disp. + Tank + Price + Frt.Leg.Room,
+ multinomial, data = scar, Rank = 2, trace = TRUE,
+ constraints = clist, Norrr = ~ 1 + Width + Weight,
+ Uncor = TRUE, Corner = FALSE, Bestof = 2)
+fit at misc$deviance # A history of the fits
Coef(fit)
\dontrun{ biplot(fit, chull = TRUE, scores = TRUE, clty = 2, Ccex = 2,
ccol = "blue", scol = "red", Ccol = "darkgreen", Clwd = 2,
diff --git a/man/rrvglm.control.Rd b/man/rrvglm.control.Rd
index 813020b..d05e2d7 100644
--- a/man/rrvglm.control.Rd
+++ b/man/rrvglm.control.Rd
@@ -5,6 +5,7 @@
\description{
Algorithmic constants and parameters for running \code{rrvglm} are set
using this function.
+
}
\usage{
rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
@@ -27,11 +28,13 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
The variables making up \bold{x1} are given by the terms in
\code{Norrr} argument, and the rest of the terms comprise \bold{x2}.
+
}
\item{Algorithm}{
Character string indicating what algorithm is
to be used. The default is the first one.
+
}
\item{Corner}{
Logical indicating whether corner constraints are
@@ -40,6 +43,7 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
of the constraint matrices that are use as the corner constraints,
i.e., they hold an order-\eqn{R} identity matrix.
+
}
\item{Uncorrelated.lv}{
Logical indicating whether uncorrelated latent variables are to be used.
@@ -48,6 +52,7 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
variance and uncorrelated. This constraint does not lead to a unique
solution because it can be rotated.
+
}
\item{Wmat}{ Yet to be done. }
\item{Svd.arg}{
@@ -55,12 +60,14 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
of the outer product is to computed. This is another normalization
which ensures uniqueness. See the argument \code{Alpha} below.
+
}
\item{Index.corner}{
Specifies the \eqn{R} rows of the constraint matrices that are
used for the corner constraints, i.e., they hold an order-\eqn{R}
identity matrix.
+
}
\item{Alpha}{
The exponent in the singular value decomposition that is used in
@@ -73,6 +80,7 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
A value of 0.5 is `symmetrical'.
This argument is used only when \code{Svd.arg=TRUE}.
+
}
\item{Bestof}{
Integer. The best of \code{Bestof} models fitted is
@@ -82,11 +90,13 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
for \bold{C},
i.e., when \bold{C} is \emph{not} passed in as initial values.
+
}
\item{Ainit, Cinit}{
Initial \bold{A} and \bold{C} matrices which may speed up convergence.
They must be of the correct dimension.
+
}
\item{Etamat.colmax}{
Positive integer, no smaller than \code{Rank}. Controls the amount
@@ -95,6 +105,7 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
In general, the larger the value, the better the initial value.
Used only if \code{Use.Init.Poisson.QO=TRUE}.
+
}
% \item{Quadratic}{
@@ -103,9 +114,13 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
% \code{"qrrvglm"} will be returned, otherwise \code{"rrvglm"}.
% }
\item{szero}{
- Integer vector specifying which rows
- of the constraint matrices are to be all zeros.
- These are called structural zeros.
+ Integer vector specifying which rows
+ of the estimated constraint matrices (\bold{A}) are
+ to be all zeros.
+ These are called \emph{structural zeros}.
+ Must not have any common value with \code{Index.corner}, and
+ be a subset of the vector \code{1:M}.
+
}
\item{SD.Ainit, SD.Cinit}{
@@ -114,6 +129,7 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
These are normally distributed with mean zero.
This argument is used only if \code{Use.Init.Poisson.QO = FALSE}.
+
}
% \item{ppar}{ Ignore this. }
\item{Norrr}{
@@ -228,7 +244,7 @@ rrvglm.control(Rank = 1, Algorithm = c("alternating", "derivative"),
\examples{
set.seed(111)
pneumo <- transform(pneumo, let = log(exposure.time),
- x3 = runif(nrow(pneumo))) # x3 is random noise
+ x3 = runif(nrow(pneumo))) # x3 is random noise
fit <- rrvglm(cbind(normal, mild, severe) ~ let + x3,
multinomial, pneumo, Rank = 1, Index.corner = 2)
constraints(fit)
diff --git a/man/rrvglm.optim.control.Rd b/man/rrvglm.optim.control.Rd
index 33380c6..0b629d8 100644
--- a/man/rrvglm.optim.control.Rd
+++ b/man/rrvglm.optim.control.Rd
@@ -43,10 +43,13 @@ it to a low value will obtain the global solution. It appears that,
if BFGS kicks in too late when the Nelder-Mead algorithm is starting to
converge to a local solution, then switching to BFGS will not be sufficient
to bypass convergence to that local solution.
+
}
-\seealso{ \code{\link{rrvglm.control}},
-\code{\link[stats]{optim}}.
+\seealso{
+ \code{\link{rrvglm.control}},
+ \code{\link[stats]{optim}}.
+
}
%\examples{
%}
diff --git a/man/ruge.Rd b/man/ruge.Rd
index fc99396..8585e9c 100644
--- a/man/ruge.Rd
+++ b/man/ruge.Rd
@@ -21,6 +21,7 @@
of zero counts.
The counts can be thought of as being approximately Poisson distributed.
+
}
\source{
Rutherford, E. and Geiger, H. (1910)
@@ -28,13 +29,14 @@
\emph{Philosophical Magazine},
\bold{20}, 698--704.
+
}
%\references{
%}
\examples{
-lambdahat = with(ruge, weighted.mean(number, w=counts))
-(N = with(ruge, sum(counts)))
+lambdahat <- with(ruge, weighted.mean(number, w = counts))
+(N <- with(ruge, sum(counts)))
with(ruge, cbind(number, counts,
- fitted=round(N * dpois(number, lam=lambdahat))))
+ fitted = round(N * dpois(number, lam = lambdahat))))
}
\keyword{datasets}
diff --git a/man/s.Rd b/man/s.Rd
index 1180f18..cc1cf4f 100644
--- a/man/s.Rd
+++ b/man/s.Rd
@@ -106,21 +106,21 @@ Vector generalized additive models.
\examples{
# Nonparametric logistic regression
-fit = vgam(agaaus ~ s(altitude, df=2), binomialff, hunua)
+fit <- vgam(agaaus ~ s(altitude, df = 2), binomialff, hunua)
\dontrun{
-plot(fit, se=TRUE)}
+plot(fit, se = TRUE)}
# Bivariate logistic model with artificial data
-nn = 300
-mydf = data.frame(x1=runif(nn), x2=runif(nn))
-mydf = transform(mydf,
- y1 = rbinom(nn, size=1, prob=logit(sin(2*x2), inv=TRUE)),
- y2 = rbinom(nn, size=1, prob=logit(sin(2*x2), inv=TRUE)))
-fit = vgam(cbind(y1,y2) ~ x1 + s(x2, 3), trace=TRUE,
- binom2.or(exchangeable = TRUE ~ s(x2,3)), data=mydf)
-coef(fit, matrix=TRUE) # Hard to interpret
+nn <- 300
+mydf <- data.frame(x1 = runif(nn), x2 = runif(nn))
+mydf <- transform(mydf,
+ y1 = rbinom(nn, size = 1, prob = logit(sin(2*x2), inv = TRUE)),
+ y2 = rbinom(nn, size = 1, prob = logit(sin(2*x2), inv = TRUE)))
+fit <- vgam(cbind(y1, y2) ~ x1 + s(x2, 3), trace=TRUE,
+ binom2.or(exchangeable = TRUE ~ s(x2, 3)), data = mydf)
+coef(fit, matrix = TRUE) # Hard to interpret
\dontrun{
-plot(fit, se=TRUE, which.term= 2, scol="blue")}
+plot(fit, se = TRUE, which.term = 2, scol = "blue")}
}
\keyword{models}
\keyword{regression}
diff --git a/man/seq2binomial.Rd b/man/seq2binomial.Rd
index 17556e1..3dc887a 100644
--- a/man/seq2binomial.Rd
+++ b/man/seq2binomial.Rd
@@ -8,8 +8,8 @@
}
\usage{
-seq2binomial(lprob1 = "logit", lprob2 = "logit", eprob1 = list(),
- eprob2 = list(), iprob1 = NULL, iprob2 = NULL, zero = NULL)
+seq2binomial(lprob1 = "logit", lprob2 = "logit",
+ iprob1 = NULL, iprob2 = NULL, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -19,11 +19,6 @@ seq2binomial(lprob1 = "logit", lprob2 = "logit", eprob1 = list(),
See \code{\link{Links}} for more choices.
}
- \item{eprob1, eprob2}{
- Lists. Extra arguments for the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{iprob1, iprob2}{
Optional initial value for the first and second probabilities respectively.
A \code{NULL} means a value is obtained in the \code{initialize} slot.
@@ -91,23 +86,25 @@ seq2binomial(lprob1 = "logit", lprob2 = "logit", eprob1 = list(),
for \eqn{y_1}{y1}, e.g., if \code{mvector} below has some values
which are zero.
+
}
\seealso{
\code{\link{binomialff}}.
+
}
\examples{
-sdata = data.frame(mvector = round(rnorm(nn <- 100, m = 10, sd = 2)),
- x = runif(nn))
-sdata = transform(sdata, prob1 = logit(+2 - x, inverse = TRUE),
- prob2 = logit(-2 + x, inverse = TRUE))
-sdata = transform(sdata, successes1 = rbinom(nn, size=mvector, prob=prob1))
-sdata = transform(sdata, successes2 = rbinom(nn, size=successes1, prob=prob2))
-sdata = transform(sdata, y1 = successes1 / mvector)
-sdata = transform(sdata, y2 = successes2 / successes1)
-fit = vglm(cbind(y1,y2) ~ x, seq2binomial, weight=mvector,
- data = sdata, trace=TRUE)
+sdata <- data.frame(mvector = round(rnorm(nn <- 100, m = 10, sd = 2)),
+ x2 = runif(nn))
+sdata <- transform(sdata, prob1 = logit(+2 - x2, inverse = TRUE),
+ prob2 = logit(-2 + x2, inverse = TRUE))
+sdata <- transform(sdata, successes1 = rbinom(nn, size = mvector, prob = prob1))
+sdata <- transform(sdata, successes2 = rbinom(nn, size = successes1, prob = prob2))
+sdata <- transform(sdata, y1 = successes1 / mvector)
+sdata <- transform(sdata, y2 = successes2 / successes1)
+fit <- vglm(cbind(y1, y2) ~ x2, seq2binomial, weight = mvector,
+ data = sdata, trace = TRUE)
coef(fit)
coef(fit, matrix = TRUE)
head(fitted(fit))
diff --git a/man/simplex.Rd b/man/simplex.Rd
index f5f37c4..8f9df48 100644
--- a/man/simplex.Rd
+++ b/man/simplex.Rd
@@ -9,7 +9,7 @@
}
\usage{
-simplex(lmu = "logit", lsigma = "loge", emu=list(), esigma=list(),
+simplex(lmu = "logit", lsigma = "loge",
imu = NULL, isigma = NULL,
imethod = 1, shrinkage.init = 0.95, zero = 2)
@@ -21,11 +21,6 @@ simplex(lmu = "logit", lsigma = "loge", emu=list(), esigma=list(),
See \code{\link{Links}} for more choices.
}
- \item{emu, esigma}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{imu, isigma}{
Optional initial values for \code{mu} and \code{sigma}.
A \code{NULL} means a value is obtained internally.
@@ -50,6 +45,7 @@ simplex(lmu = "logit", lsigma = "loge", emu=list(), esigma=list(),
returned as the fitted values).
+
% This comes from Jorgensen but it is not confirmed by simulations:
% The variance of \eqn{Y} is \eqn{\mu (1 - \mu) - \sqrt{ \lambda / 2}
% \exp\{ \lambda / (\mu^2 (1 - \mu)^2) \}
diff --git a/man/simplexUC.Rd b/man/simplexUC.Rd
index 6a5196d..a517042 100644
--- a/man/simplexUC.Rd
+++ b/man/simplexUC.Rd
@@ -34,6 +34,7 @@ rsimplex(n, mu = 0.5, dispersion = 1)
it may be very slow if the density is highly peaked,
and will fail if the density asymptotes at the boundary.
+
}
\value{
\code{dsimplex(x)} gives the density function,
@@ -48,17 +49,18 @@ rsimplex(n, mu = 0.5, dispersion = 1)
\seealso{
\code{\link{simplex}}.
+
}
\examples{
-sigma = c(4, 2, 1) # Dispersion parameter
-mymu = c(.1, .5, .7); xxx = seq(0, 1, len = 501)
-\dontrun{ par(mfrow=c(3,3)) # Figure 2.1 of Song (2007)
+sigma <- c(4, 2, 1) # Dispersion parameter
+mymu <- c(0.1, 0.5, 0.7); xxx <- seq(0, 1, len = 501)
+\dontrun{ par(mfrow = c(3, 3)) # Figure 2.1 of Song (2007)
for(iii in 1:3)
- for(jjj in 1:3) {
- plot(xxx, dsimplex(xxx, mymu[jjj], sigma[iii]),
- type = "l", col = "blue", xlab = "", ylab = "", main =
- paste("mu = ", mymu[jjj], ", sigma = ", sigma[iii], sep = "")) } }
+ for(jjj in 1:3) {
+ plot(xxx, dsimplex(xxx, mymu[jjj], sigma[iii]),
+ type = "l", col = "blue", xlab = "", ylab = "", main =
+ paste("mu = ", mymu[jjj], ", sigma = ", sigma[iii], sep = "")) } }
}
\keyword{distribution}
diff --git a/man/sinmad.Rd b/man/sinmad.Rd
index 94d8e1c..498b58a 100644
--- a/man/sinmad.Rd
+++ b/man/sinmad.Rd
@@ -8,7 +8,6 @@
}
\usage{
sinmad(lshape1.a = "loge", lscale = "loge", lshape3.q = "loge",
- eshape1.a = list(), escale = list(), eshape3.q = list(),
ishape1.a = NULL, iscale = NULL, ishape3.q = 1, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -19,11 +18,6 @@ sinmad(lshape1.a = "loge", lscale = "loge", lshape3.q = "loge",
See \code{\link{Links}} for more choices.
}
- \item{eshape1.a, escale, eshape3.q}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{ishape1.a, iscale, ishape3.q}{
Optional initial values for \code{a}, \code{scale}, and \code{q}.
@@ -100,12 +94,13 @@ Hoboken, NJ, USA: Wiley-Interscience.
\code{\link{paralogistic}},
\code{\link{invparalogistic}}.
+
}
\examples{
-sdata = data.frame(y = rsinmad(n = 1000, exp(1), exp(2), exp(0)))
-fit = vglm(y ~ 1, sinmad, sdata, trace = TRUE)
-fit = vglm(y ~ 1, sinmad(ishape1.a = exp(1)), sdata, trace = TRUE)
+sdata <- data.frame(y = rsinmad(n = 1000, exp(1), exp(2), exp(0)))
+fit <- vglm(y ~ 1, sinmad, sdata, trace = TRUE)
+fit <- vglm(y ~ 1, sinmad(ishape1.a = exp(1)), sdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
diff --git a/man/sinmadUC.Rd b/man/sinmadUC.Rd
index 42c250d..bbcc3fb 100644
--- a/man/sinmadUC.Rd
+++ b/man/sinmadUC.Rd
@@ -45,6 +45,7 @@ Kleiber, C. and Kotz, S. (2003)
Actuarial Sciences},
Hoboken, NJ: Wiley-Interscience.
+
}
\author{ T. W. Yee }
\details{
@@ -63,10 +64,11 @@ Hoboken, NJ: Wiley-Interscience.
\code{\link{sinmad}},
\code{\link{genbetaII}}.
+
}
\examples{
-sdata = data.frame(y = rsinmad(n = 3000, 4, 6, 2))
-fit = vglm(y ~ 1, sinmad(ishape1.a = 2.1), sdata, trace = TRUE, crit = "coef")
+sdata <- data.frame(y = rsinmad(n = 3000, 4, 6, 2))
+fit <- vglm(y ~ 1, sinmad(ishape1.a = 2.1), sdata, trace = TRUE, crit = "coef")
coef(fit, matrix = TRUE)
Coef(fit)
}
diff --git a/man/skellam.Rd b/man/skellam.Rd
index 9119904..7e37b2d 100644
--- a/man/skellam.Rd
+++ b/man/skellam.Rd
@@ -8,18 +8,13 @@
}
\usage{
-skellam(lmu1="loge", lmu2="loge", emu1=list(), emu2=list(),
- imu1=NULL, imu2=NULL, nsimEIM=100, parallel=FALSE, zero=NULL)
+skellam(lmu1 = "loge", lmu2 = "loge", imu1 = NULL, imu2 = NULL,
+ nsimEIM = 100, parallel = FALSE, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{lmu1, emu1}{
- Link function and extra argument for the \eqn{\mu1}{mu1} parameter.
- See \code{\link{Links}} for more choices and for general information.
-
- }
- \item{lmu2, emu2}{
- Link function and extra argument for the \eqn{\mu1}{mu1} parameter.
+ \item{lmu1, lmu2}{
+ Link functions for the \eqn{\mu_1}{mu1} and \eqn{\mu_2}{mu2} parameters.
See \code{\link{Links}} for more choices and for general information.
}
@@ -55,22 +50,26 @@ f(y;mu1,mu2) =
Here, \eqn{I_v} is the modified Bessel function of the
first kind with order \eqn{v}.
+
The mean is \eqn{\mu_1 - \mu_2}{mu1 - mu2} (returned as the fitted values)
and the variance is \eqn{\mu_1 + \mu_2}{mu1 + mu2}.
Simulated Fisher scoring is implemented.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\section{Warning }{
This \pkg{VGAM} family function seems fragile and very sensitive to
the initial values.
Use very cautiously!!
+
}
\references{
@@ -80,12 +79,14 @@ two Poisson variates belonging to different populations.
\emph{Journal of the Royal Statistical Society, Series A},
\bold{109}, 296.
+
}
%\author{ T. W. Yee }
\note{
Numerical problems may occur for data if \eqn{\mu_1}{mu1} and/or
\eqn{\mu_2}{mu2} are large.
+
}
\seealso{
@@ -93,19 +94,22 @@ two Poisson variates belonging to different populations.
\code{\link[stats:Poisson]{dpois}},
\code{\link{poissonff}}.
+
}
\examples{
-sdata = data.frame(x = runif(nn <- 1000))
-sdata = transform(sdata, mu1 = exp(1+x), mu2 = exp(1+x))
-sdata = transform(sdata, y = rskellam(nn, mu1, mu2))
-fit1 = vglm(y ~ x, skellam, sdata, trace=TRUE)
-fit2 = vglm(y ~ x, skellam(parallel=TRUE), sdata, trace=TRUE, crit="c")
-coef(fit1, matrix=TRUE)
-coef(fit2, matrix=TRUE)
+\dontrun{
+sdata <- data.frame(x2 = runif(nn <- 1000))
+sdata <- transform(sdata, mu1 = exp(1+x2), mu2 = exp(1+x2))
+sdata <- transform(sdata, y = rskellam(nn, mu1, mu2))
+fit1 <- vglm(y ~ x2, skellam, sdata, trace = TRUE, crit = "c")
+fit2 <- vglm(y ~ x2, skellam(parallel = TRUE), sdata, trace = TRUE)
+coef(fit1, matrix = TRUE)
+coef(fit2, matrix = TRUE)
summary(fit1)
# Likelihood ratio test for equal means:
-pchisq(2*(logLik(fit1)-logLik(fit2)),
- df=fit2 at df.residual-fit1@df.residual, lower.tail=FALSE)
+pchisq(2 * (logLik(fit1) - logLik(fit2)),
+ df = fit2 at df.residual - fit1 at df.residual, lower.tail = FALSE)
+}
}
\keyword{models}
\keyword{regression}
diff --git a/man/skellamUC.Rd b/man/skellamUC.Rd
index cf33e74..b5e73db 100644
--- a/man/skellamUC.Rd
+++ b/man/skellamUC.Rd
@@ -13,7 +13,7 @@
}
\usage{
-dskellam(x, mu1, mu2, log=FALSE)
+dskellam(x, mu1, mu2, log = FALSE)
%pskellam(q, mu1, mu2)
%qskellam(p, mu1, mu2)
rskellam(n, mu1, mu2)
@@ -39,26 +39,26 @@ rskellam(n, mu1, mu2)
for estimating the parameters,
for the formula of the probability density function and other details.
+
}
\section{Warning }{
Numerical problems may occur for data if \eqn{\mu_1}{mu1} and/or
\eqn{\mu_2}{mu2} are large.
The normal approximation for this case has not been implemented yet.
+
}
\seealso{
\code{\link{skellam}},
\code{\link[stats:Poisson]{dpois}}.
+
}
\examples{
-\dontrun{
-mu1 = 1; mu2 = 2
-x = (-7):7
-plot(x, dskellam(x, mu1, mu2), type="h", las=1, col="blue",
- main=paste("Density of Skellam distribution with mu1=", mu1,
- " and mu2=", mu2, sep=""))
-}
+\dontrun{ mu1 <- 1; mu2 <- 2; x <- (-7):7
+plot(x, dskellam(x, mu1, mu2), type = "h", las = 1, col = "blue",
+ main = paste("Density of Skellam distribution with mu1 = ", mu1,
+ " and mu2 = ", mu2, sep = "")) }
}
\keyword{distribution}
diff --git a/man/skewnormal1.Rd b/man/skewnormal1.Rd
index 603acf6..de92d9a 100644
--- a/man/skewnormal1.Rd
+++ b/man/skewnormal1.Rd
@@ -8,12 +8,11 @@
}
\usage{
-skewnormal1(lshape = "identity", earg = list(), ishape = NULL,
- nsimEIM = NULL)
+skewnormal1(lshape = "identity", ishape = NULL, nsimEIM = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{lshape, earg, ishape, nsimEIM}{
+ \item{lshape, ishape, nsimEIM}{
See \code{\link{Links}} and
\code{\link{CommonVGAMffArguments}}.
@@ -52,6 +51,7 @@ skewnormal1(lshape = "identity", earg = list(), ishape = NULL,
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
\references{
@@ -82,9 +82,9 @@ distribution.
}
\section{Warning }{
- It is well known that the EIM of Azzalini's skew-normal distribution
- is singular for skewness parameter tending to zero, and thus produces
- influential problems.
+ It is well known that the EIM of Azzalini's skew-normal
+ distribution is singular for skewness parameter tending to zero,
+ and thus produces influential problems.
}
@@ -93,6 +93,7 @@ distribution.
\code{\link{normal1}},
\code{\link{fnormal1}}.
+
}
\examples{
@@ -105,9 +106,9 @@ with(sdata, mean(y))
x <- with(sdata, seq(min(y), max(y), len = 200))
with(sdata, lines(x, dsnorm(x, shape = Coef(fit)), col = "blue")) }
-sdata <- data.frame(x = runif(nn))
-sdata <- transform(sdata, y = rsnorm(nn, shape = 1 + 2*x))
-fit <- vglm(y ~ x, skewnormal1, sdata, trace = TRUE, crit = "coef")
+sdata <- data.frame(x2 = runif(nn))
+sdata <- transform(sdata, y = rsnorm(nn, shape = 1 + 2*x2))
+fit <- vglm(y ~ x2, skewnormal1, sdata, trace = TRUE, crit = "coef")
summary(fit)
}
\keyword{models}
diff --git a/man/slash.Rd b/man/slash.Rd
index 829a50f..dbbee72 100644
--- a/man/slash.Rd
+++ b/man/slash.Rd
@@ -7,9 +7,9 @@
slash distribution by maximum likelihood estimation.
}
\usage{
-slash (lmu="identity", lsigma="loge", emu=list(), esigma=list(),
- imu=NULL, isigma=NULL, iprobs = c(0.1, 0.9), nsimEIM=250,
- zero=NULL, smallno = .Machine$double.eps*1000)
+slash(lmu = "identity", lsigma = "loge",
+ imu = NULL, isigma = NULL, iprobs = c(0.1, 0.9), nsimEIM = 250,
+ zero = NULL, smallno = .Machine$double.eps*1000)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -19,11 +19,13 @@ slash (lmu="identity", lsigma="loge", emu=list(), esigma=list(),
See \code{\link{Links}} for more choices.
}
- \item{emu, esigma}{
- List. Extra argument for each of the link functions.
- See \code{earg} in \code{\link{Links}} for general information.
- }
+% \item{emu, esigma}{
+% List. Extra argument for each of the link functions.
+% See \code{earg} in \code{\link{Links}} for general information.
+%emu = list(), esigma = list(),
+% }
+
\item{imu, isigma}{
Initial values.
A \code{NULL} means an initial value is chosen internally.
@@ -81,11 +83,13 @@ f(y) = 1/(2*sigma*sqrt(2*pi)) if y=mu
\emph{Continuous Univariate Distributions},
2nd edition, Volume 1, New York: Wiley.
+
Kafadar, K. (1982)
A Biweight Approach to the One-Sample Problem
\emph{Journal of the American Statistical Association},
\bold{77}, 416--424.
-
+
+
}
\author{ T. W. Yee and C. S. Chee }
@@ -94,20 +98,24 @@ f(y) = 1/(2*sigma*sqrt(2*pi)) if y=mu
Convergence is often quite slow.
Numerical problems may occur.
+
}
\seealso{
\code{\link{rslash}}.
+
}
\examples{
-sdata = data.frame(y = rslash(n=1000, mu=4, sigma=exp(2)))
-fit = vglm(y ~ 1, slash, sdata, trace=TRUE)
-coef(fit, matrix=TRUE)
+\dontrun{
+sdata <- data.frame(y = rslash(n = 1000, mu = 4, sigma = exp(2)))
+fit <- vglm(y ~ 1, slash, sdata, trace = TRUE)
+coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
}
+}
\keyword{models}
\keyword{regression}
diff --git a/man/slashUC.Rd b/man/slashUC.Rd
index e8c971c..49cb586 100644
--- a/man/slashUC.Rd
+++ b/man/slashUC.Rd
@@ -11,9 +11,9 @@
}
\usage{
-dslash(x, mu=0, sigma=1, log=FALSE, smallno=.Machine$double.eps*1000)
-pslash(q, mu=0, sigma=1)
-rslash(n, mu=0, sigma=1)
+dslash(x, mu = 0, sigma = 1, log = FALSE, smallno = .Machine$double.eps*1000)
+pslash(q, mu = 0, sigma = 1)
+rslash(n, mu = 0, sigma = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -58,14 +58,14 @@ rslash(n, mu=0, sigma=1)
}
\examples{
\dontrun{
-curve(dslash, col="blue", ylab="f(x)", -5, 5, ylim=c(0,0.4),
- main="Standard slash, normal and Cauchy densities", lwd=2)
-curve(dnorm, col="black", lty=2, lwd=2, add=TRUE)
-curve(dcauchy, col="red", lty=3, lwd=2, add=TRUE)
-legend(x=2, y=0.3, c("slash","normal","Cauchy"), lty=1:3,
- col=c("blue","black","red"), lwd=2)
+curve(dslash, col = "blue", ylab = "f(x)", -5, 5, ylim = c(0, 0.4),
+ main = "Standard slash, normal and Cauchy densities", lwd = 2)
+curve(dnorm, col = "black", lty = 2, lwd = 2, add = TRUE)
+curve(dcauchy, col = "orange", lty = 3, lwd = 2, add = TRUE)
+legend(x = 2, y = 0.3, c("slash", "normal", "Cauchy"), lty = 1:3,
+ col = c("blue","black","orange"), lwd = 2)
-curve(pslash, col="blue", -5, 5, ylim=0:1)
+curve(pslash, col = "blue", -5, 5, ylim = 0:1)
}
}
\keyword{distribution}
diff --git a/man/snormUC.Rd b/man/snormUC.Rd
index b2f1182..1be799d 100644
--- a/man/snormUC.Rd
+++ b/man/snormUC.Rd
@@ -78,14 +78,15 @@ rsnorm(n, location = 0, scale = 1, shape = 0)
\seealso{
\code{\link{skewnormal1}}.
+
}
\examples{
-\dontrun{ N <- 200 # grid resolution
+\dontrun{ N <- 200 # grid resolution
shape <- 7
x <- seq(-4, 4, len = N)
plot(x, dsnorm(x, shape = shape), type = "l", col = "blue", las = 1,
ylab = "", lty = 1, lwd = 2)
-abline(v = 0, h = 0)
+abline(v = 0, h = 0, col = "grey")
lines(x, dnorm(x), col = "orange", lty = 2, lwd = 2)
legend("topleft", leg = c(paste("Blue = dsnorm(x, ", shape,")", sep = ""),
"Orange = standard normal density"), lty = 1:2, lwd = 2,
diff --git a/man/sratio.Rd b/man/sratio.Rd
index cfb3a87..8e7c378 100644
--- a/man/sratio.Rd
+++ b/man/sratio.Rd
@@ -7,9 +7,8 @@
regression model to an ordered (preferably) factor response.
}
\usage{
-sratio(link = "logit", earg = list(),
- parallel = FALSE, reverse = FALSE, zero = NULL,
- whitespace = FALSE)
+sratio(link = "logit", parallel = FALSE, reverse = FALSE,
+ zero = NULL, whitespace = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -20,11 +19,6 @@ sratio(link = "logit", earg = list(),
See \code{\link{Links}} for more choices.
}
- \item{earg}{
- List. Extra argument for the link function.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{parallel}{
A logical, or formula specifying which terms have
equal/unequal coefficients.
@@ -122,7 +116,8 @@ contains further information and examples.
}
\section{Warning }{
- No check is made to verify that the response is ordinal;
+ No check is made to verify that the response is ordinal if the
+ response is a matrix;
see \code{\link[base:factor]{ordered}}.
@@ -143,8 +138,8 @@ contains further information and examples.
}
\examples{
-pneumo = transform(pneumo, let = log(exposure.time))
-(fit = vglm(cbind(normal, mild, severe) ~ let,
+pneumo <- transform(pneumo, let = log(exposure.time))
+(fit <- vglm(cbind(normal, mild, severe) ~ let,
sratio(parallel = TRUE), data = pneumo))
coef(fit, matrix = TRUE)
constraints(fit)
diff --git a/man/studentt.Rd b/man/studentt.Rd
index dbf4c50..234fce5 100644
--- a/man/studentt.Rd
+++ b/man/studentt.Rd
@@ -9,16 +9,10 @@
}
\usage{
-studentt(ldf = "loglog", edf = list(), idf = NULL, tol1 = 0.1,
- imethod = 1)
-studentt2(df = Inf,
- llocation = "identity", elocation = list(),
- lscale = "loge", escale = list(),
- ilocation = NULL, iscale = NULL,
- imethod = 1, zero = -2)
-studentt3(llocation = "identity", elocation = list(),
- lscale = "loge", escale = list(),
- ldf = "loglog", edf = list(),
+studentt(ldf = "loglog", idf = NULL, tol1 = 0.1, imethod = 1)
+studentt2(df = Inf, llocation = "identity", lscale = "loge",
+ ilocation = NULL, iscale = NULL, imethod = 1, zero = -2)
+studentt3(llocation = "identity", lscale = "loge", ldf = "loglog",
ilocation = NULL, iscale = NULL, idf = NULL,
imethod = 1, zero = -(2:3))
}
@@ -33,11 +27,6 @@ studentt3(llocation = "identity", elocation = list(),
than unity; see below.
}
- \item{elocation, escale, edf}{
- List. Extra arguments for the links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{ilocation, iscale, idf}{
Optional initial values.
If given, the values must be in range.
@@ -102,6 +91,7 @@ studentt3(llocation = "identity", elocation = list(),
In general convergence can be slow, especially when there are
covariates.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -154,6 +144,7 @@ application to financial econometrics.
\code{\link{koenker}},
\code{\link[stats]{TDist}}.
+
}
\examples{
tdata <- data.frame(x2 = runif(nn <- 1000))
diff --git a/man/tikuv.Rd b/man/tikuv.Rd
index 1077ba8..2c3527c 100644
--- a/man/tikuv.Rd
+++ b/man/tikuv.Rd
@@ -7,7 +7,7 @@
}
\usage{
-tikuv(d, lmean = "identity", lsigma = "loge", emean = list(), esigma = list(),
+tikuv(d, lmean = "identity", lsigma = "loge",
isigma = NULL, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
@@ -25,11 +25,16 @@ tikuv(d, lmean = "identity", lsigma = "loge", emean = list(), esigma = list(),
See \code{\link{Links}} for more choices.
}
- \item{emean, esigma}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
- }
+
+% \item{emean, esigma}{
+% List. Extra argument for each of the links.
+% See \code{earg} in \code{\link{Links}} for general information.
+%emean = list(), esigma = list(),
+%
+% }
+
+
\item{isigma}{
Optional initial value for \eqn{\sigma}{sigma}.
A \code{NULL} means a value is computed internally.
@@ -116,11 +121,11 @@ tikuv(d, lmean = "identity", lsigma = "loge", emean = list(), esigma = list(),
\examples{
m = 1.0; sigma = exp(0.5)
-tdata = data.frame(y = rtikuv(n = 1000, d = 1, m = m, s = sigma))
-tdata = transform(tdata, sy = sort(y))
-fit = vglm(y ~ 1, fam = tikuv(d = 1), tdata, trace = TRUE)
+tdata <- data.frame(y = rtikuv(n = 1000, d = 1, m = m, s = sigma))
+tdata <- transform(tdata, sy = sort(y))
+fit <- vglm(y ~ 1, fam = tikuv(d = 1), tdata, trace = TRUE)
coef(fit, matrix = TRUE)
-(Cfit = Coef(fit))
+(Cfit <- Coef(fit))
with(tdata, mean(y))
\dontrun{ with(tdata, hist(y, prob = TRUE))
lines(dtikuv(sy, d = 1, m = Cfit[1], s = Cfit[2]) ~ sy, tdata, col = "orange") }
diff --git a/man/tikuvUC.Rd b/man/tikuvUC.Rd
index 99c13bf..84ff221 100644
--- a/man/tikuvUC.Rd
+++ b/man/tikuvUC.Rd
@@ -70,7 +70,7 @@ rtikuv(n, d, mean = 0, sigma = 1, Smallno = 1.0e-6)
}
\examples{
\dontrun{ par(mfrow = c(2, 1))
-x = seq(-5, 5, len = 401)
+x <- seq(-5, 5, len = 401)
plot(x, dnorm(x), type = "l", col = "black", ylab = "", las = 1,
main = "Black is standard normal, others are dtikuv(x, d)")
lines(x, dtikuv(x, d = -10), col = "orange")
@@ -85,11 +85,10 @@ lines(x, ptikuv(x, d = -10), col = "orange")
lines(x, ptikuv(x, d = -1 ), col = "blue")
lines(x, ptikuv(x, d = 1 ), col = "green")
legend("topleft", col = c("orange","blue","green"), lty = rep(1, len = 3),
- legend = paste("d =", c(-10, -1, 1)))
+ legend = paste("d =", c(-10, -1, 1))) }
-probs = seq(0.1, 0.9, by = 0.1)
-ptikuv(qtikuv(p = probs, d = 1), d = 1) - probs # Should be all 0
-}
+probs <- seq(0.1, 0.9, by = 0.1)
+ptikuv(qtikuv(p = probs, d = 1), d = 1) - probs # Should be all 0
}
\keyword{distribution}
diff --git a/man/tobit.Rd b/man/tobit.Rd
index 08d7460..132952e 100644
--- a/man/tobit.Rd
+++ b/man/tobit.Rd
@@ -8,39 +8,40 @@
}
\usage{
tobit(Lower = 0, Upper = Inf, lmu = "identity", lsd = "loge",
- emu = list(), esd = list(), nsimEIM = 250,
- imu = NULL, isd = NULL,
+ nsimEIM = 250, imu = NULL, isd = NULL,
type.fitted = c("uncensored", "censored", "mean.obs"),
imethod = 1, zero = -2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{Lower}{
- Numeric of length 1, it is the value \eqn{L} described below.
+ Numeric. It is the value \eqn{L} described below.
Any value of the linear model
\eqn{x_i^T \beta}{x_i^T beta} that
is less than this lowerbound is assigned this value.
Hence this should be the smallest possible value in the response
variable.
+ May be a vector (see below for more information).
}
\item{Upper}{
- Numeric of length 1, it is the value \eqn{U} described below.
+ Numeric. It is the value \eqn{U} described below.
Any value of the linear model
\eqn{x_i^T \beta}{x_i^T beta} that
is greater than this upperbound is assigned this value.
Hence this should be the largest possible value in the response
variable.
+ May be a vector (see below for more information).
}
- \item{lmu, lsd, emu, esd}{
- Parameter link functions and extra arguments for the mean and
- standard deviation parameters.
+ \item{lmu, lsd}{
+ Parameter link functions for the mean and standard deviation parameters.
See \code{\link{Links}} for more choices.
The standard deviation is a positive quantity, therefore a log link
is its default.
}
+
\item{imu, isd}{
See \code{\link{CommonVGAMffArguments}} for information.
@@ -135,8 +136,15 @@ tobit(Lower = 0, Upper = Inf, lmu = "identity", lsd = "loge",
\author{ Thomas W. Yee }
\note{
- The response can be a matrix. Then \code{Lower} and \code{Upper}
- are recycled to the number of columns.
+ The response can be a matrix.
+ If so, then \code{Lower} and \code{Upper}
+ are recycled into a matrix with the number of columns equal
+ to the number of responses,
+ and the recycling is done row-wise (\code{byrow = TRUE}).
+ For example, these are returned in \code{fit4 at misc$Lower} and
+ \code{fit4 at misc$Upper} below.
+
+
If there is no censoring then
\code{\link{normal1}} is recommended instead. Any value of the
response less than \code{Lower} or greater than \code{Upper} will
@@ -162,31 +170,59 @@ tobit(Lower = 0, Upper = Inf, lmu = "identity", lsd = "loge",
}
\examples{
# Here, fit1 is a standard Tobit model and fit2 is a nonstandard Tobit model
-Lower = 1; Upper = 4; set.seed(1) # For the nonstandard Tobit model
-tdata = data.frame(x2 = seq(-1, 1, len = (nn <- 100)))
-meanfun1 = function(x) 0 + 2*x
-meanfun2 = function(x) 2 + 2*x
-tdata = transform(tdata,
- y1 = rtobit(nn, mean = meanfun1(x2)), # Standard Tobit model
- y2 = rtobit(nn, mean = meanfun2(x2), Lower = Lower, Upper = Upper))
+tdata <- data.frame(x2 = seq(-1, 1, length = (nn <- 100)))
+set.seed(1)
+Lower <- 1; Upper = 4 # For the nonstandard Tobit model
+tdata <- transform(tdata,
+ Lower.vec = rnorm(nn, Lower, 0.5),
+ Upper.vec = rnorm(nn, Upper, 0.5))
+meanfun1 <- function(x) 0 + 2*x
+meanfun2 <- function(x) 2 + 2*x
+meanfun3 <- function(x) 2 + 2*x
+meanfun4 <- function(x) 3 + 2*x
+tdata <- transform(tdata,
+ y1 = rtobit(nn, mean = meanfun1(x2)), # Standard Tobit model
+ y2 = rtobit(nn, mean = meanfun2(x2), Lower = Lower, Upper = Upper),
+ y3 = rtobit(nn, mean = meanfun3(x2), Lower = Lower.vec, Upper = Upper.vec),
+ y4 = rtobit(nn, mean = meanfun3(x2), Lower = Lower.vec, Upper = Upper.vec))
with(tdata, table(y1 == 0)) # How many censored values?
with(tdata, table(y2 == Lower | y2 == Upper)) # How many censored values?
with(tdata, table(attr(y2, "cenL")))
with(tdata, table(attr(y2, "cenU")))
-fit1 = vglm(y1 ~ x2, tobit, tdata, trace = TRUE,
- crit = "coeff") # crit = "coeff" is recommended
+fit1 <- vglm(y1 ~ x2, tobit, tdata, trace = TRUE,
+ crit = "coeff") # crit = "coeff" is recommended
coef(fit1, matrix = TRUE)
summary(fit1)
-fit2 = vglm(y2 ~ x2, tobit(Lower = Lower, Upper = Upper, type.f = "cens"),
+fit2 <- vglm(y2 ~ x2, tobit(Lower = Lower, Upper = Upper, type.f = "cens"),
tdata, crit = "coeff", trace = TRUE) # ditto
table(fit2 at extra$censoredL)
table(fit2 at extra$censoredU)
coef(fit2, matrix = TRUE)
+fit3 <- vglm(y3 ~ x2,
+ tobit(Lower = with(tdata, Lower.vec),
+ Upper = with(tdata, Upper.vec), type.f = "cens"),
+ tdata, crit = "coeff", trace = TRUE) # ditto
+table(fit3 at extra$censoredL)
+table(fit3 at extra$censoredU)
+coef(fit3, matrix = TRUE)
+
+# fit4 is fit3 but with type.fitted = "uncen".
+fit4 <- vglm(cbind(y3, y4) ~ x2,
+ tobit(Lower = rep(with(tdata, Lower.vec), each = 2),
+ Upper = rep(with(tdata, Upper.vec), each = 2)),
+ tdata, crit = "coeff", trace = TRUE) # ditto
+head(fit4 at extra$censoredL) # A matrix
+head(fit4 at extra$censoredU) # A matrix
+head(fit4 at misc$Lower) # A matrix
+head(fit4 at misc$Upper) # A matrix
+coef(fit4, matrix = TRUE)
+
\dontrun{ # Plot the results
-par(mfrow = c(2, 1))
+par(mfrow = c(2, 2))
+# Plot fit1
plot(y1 ~ x2, tdata, las = 1, main = "Standard Tobit model",
col = as.numeric(attr(y1, "cenL")) + 3,
pch = as.numeric(attr(y1, "cenL")) + 1)
@@ -199,6 +235,7 @@ lines(fitted(fit1) ~ x2, tdata, col = "orange", lwd = 2, lty = 2)
lines(fitted(lm(y1 ~ x2, tdata)) ~ x2, tdata, col = "black",
lty = 2, lwd = 2) # This is simplest but wrong!
+# Plot fit2
plot(y2 ~ x2, tdata, las = 1, main = "Tobit model",
col = as.numeric(attr(y2, "cenL")) + 3 +
as.numeric(attr(y2, "cenU")),
@@ -212,6 +249,38 @@ lines(meanfun2(x2) ~ x2, tdata, col = "purple", lwd = 2)
lines(fitted(fit2) ~ x2, tdata, col = "orange", lwd = 2, lty = 2)
lines(fitted(lm(y2 ~ x2, tdata)) ~ x2, tdata, col = "black",
lty = 2, lwd = 2) # This is simplest but wrong!
+
+# Plot fit3
+plot(y3 ~ x2, tdata, las = 1,
+ main = "Tobit model with nonconstant censor levels",
+ col = as.numeric(attr(y3, "cenL")) + 3 +
+ as.numeric(attr(y3, "cenU")),
+ pch = as.numeric(attr(y3, "cenL")) + 1 +
+ as.numeric(attr(y3, "cenU")))
+legend(x = "topleft", leg = c("censored", "uncensored"),
+ pch = c(2, 1), col = c("blue", "green"))
+legend(-1.0, 3.5, c("Truth", "Estimate", "Naive"),
+ col = c("purple", "orange", "black"), lwd = 2, lty = c(1, 2, 2))
+lines(meanfun3(x2) ~ x2, tdata, col = "purple", lwd = 2)
+lines(fitted(fit3) ~ x2, tdata, col = "orange", lwd = 2, lty = 2)
+lines(fitted(lm(y3 ~ x2, tdata)) ~ x2, tdata, col = "black",
+ lty = 2, lwd = 2) # This is simplest but wrong!
+
+# Plot fit4
+plot(y3 ~ x2, tdata, las = 1,
+ main = "Tobit model with nonconstant censor levels",
+ col = as.numeric(attr(y3, "cenL")) + 3 +
+ as.numeric(attr(y3, "cenU")),
+ pch = as.numeric(attr(y3, "cenL")) + 1 +
+ as.numeric(attr(y3, "cenU")))
+legend(x = "topleft", leg = c("censored", "uncensored"),
+ pch = c(2, 1), col = c("blue", "green"))
+legend(-1.0, 3.5, c("Truth", "Estimate", "Naive"),
+ col = c("purple", "orange", "black"), lwd = 2, lty = c(1, 2, 2))
+lines(meanfun3(x2) ~ x2, tdata, col = "purple", lwd = 2)
+lines(fitted(fit4)[, 1] ~ x2, tdata, col = "orange", lwd = 2, lty = 2)
+lines(fitted(lm(y3 ~ x2, tdata)) ~ x2, tdata, col = "black",
+ lty = 2, lwd = 2) # This is simplest but wrong!
}
}
\keyword{models}
diff --git a/man/tobitUC.Rd b/man/tobitUC.Rd
index 710e474..30ac150 100644
--- a/man/tobitUC.Rd
+++ b/man/tobitUC.Rd
@@ -60,8 +60,8 @@ rtobit(n, mean = 0, sd = 1, Lower = 0, Upper = Inf)
}
\examples{
-\dontrun{ m = 0.5; x = seq(-2, 4, len = 501)
-Lower = -1; Upper = 2.5
+\dontrun{ m <- 0.5; x <- seq(-2, 4, len = 501)
+Lower <- -1; Upper <- 2.5
plot(x, ptobit(x, m = m, Lower = Lower, Upper = Upper),
type = "l", ylim = 0:1, las = 1, col = "orange",
ylab = paste("ptobit(m = ", m, ", sd = 1, Lower =", Lower,
@@ -71,8 +71,8 @@ plot(x, ptobit(x, m = m, Lower = Lower, Upper = Upper),
abline(h = 0)
lines(x, dtobit(x, m = m, Lower = Lower, Upper = Upper), col = "blue")
-probs = seq(0.1, 0.9, by = 0.1)
-Q = qtobit(probs, m = m, Lower = Lower, Upper = Upper)
+probs <- seq(0.1, 0.9, by = 0.1)
+Q <- qtobit(probs, m = m, Lower = Lower, Upper = Upper)
lines(Q, ptobit(Q, m = m, Lower = Lower, Upper = Upper),
col = "purple", lty = "dashed", type = "h")
lines(Q, dtobit(Q, m = m, Lower = Lower, Upper = Upper),
@@ -80,7 +80,7 @@ lines(Q, dtobit(Q, m = m, Lower = Lower, Upper = Upper),
abline(h = probs, col = "purple", lty = "dashed")
max(abs(ptobit(Q, m = m, Lower = Lower, Upper = Upper) - probs)) # Should be 0
-endpts = c(Lower, Upper) # Endpoints have a spike
+endpts <- c(Lower, Upper) # Endpoints have a spike
lines(endpts, dtobit(endpts, m = m, Lower = Lower, Upper = Upper),
col = "blue", lwd = 2, type = "h")
}
diff --git a/man/toxop.Rd b/man/toxop.Rd
index df0ad5c..70c4317 100644
--- a/man/toxop.Rd
+++ b/man/toxop.Rd
@@ -23,10 +23,14 @@
\details{
See the references for details.
+
+
}
\source{
See the references for details.
+
+
}
\seealso{
diff --git a/man/tparetoUC.Rd b/man/tparetoUC.Rd
index 8a26c46..c4c638d 100644
--- a/man/tparetoUC.Rd
+++ b/man/tparetoUC.Rd
@@ -60,20 +60,20 @@ rtpareto(n, lower, upper, shape)
}
-\examples{ lower = 3; upper = 8; kay = exp(0.5)
-\dontrun{ xx = seq(lower - 0.5, upper + 0.5, len = 401)
+\examples{ lower <- 3; upper <- 8; kay <- exp(0.5)
+\dontrun{ xx <- seq(lower - 0.5, upper + 0.5, len = 401)
plot(xx, dtpareto(xx, low = lower, upp = upper, shape = kay),
main = "Truncated Pareto density split into 10 equal areas",
type = "l", ylim = 0:1, xlab = "x")
abline(h = 0, col = "blue", lty = 2)
-qq = qtpareto(seq(0.1, 0.9, by = 0.1), low = lower, upp = upper,
- shape = kay)
+qq <- qtpareto(seq(0.1, 0.9, by = 0.1), low = lower, upp = upper,
+ shape = kay)
lines(qq, dtpareto(qq, low = lower, upp = upper, shape = kay),
col = "purple", lty = 3, type = "h")
lines(xx, ptpareto(xx, low = lower, upp = upper, shape = kay),
col = "orange") }
-pp = seq(0.1, 0.9, by = 0.1)
-qq = qtpareto(pp, low = lower, upp = upper, shape = kay)
+pp <- seq(0.1, 0.9, by = 0.1)
+qq <- qtpareto(pp, low = lower, upp = upper, shape = kay)
ptpareto(qq, low = lower, upp = upper, shape = kay)
qtpareto(ptpareto(qq, low = lower, upp = upper, shape = kay),
diff --git a/man/triangle.Rd b/man/triangle.Rd
index 2ed6ca0..6e9e90e 100644
--- a/man/triangle.Rd
+++ b/man/triangle.Rd
@@ -8,9 +8,8 @@
}
\usage{
-triangle(lower = 0, upper = 1, link = "elogit",
- earg = if(link == "elogit") list(min = lower, max = upper) else
- list(), itheta = NULL)
+triangle(lower = 0, upper = 1,
+ link = elogit(min = lower, max = upper), itheta = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -26,11 +25,6 @@ triangle(lower = 0, upper = 1, link = "elogit",
The default constrains the estimate to lie in the interval.
}
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{itheta}{
Optional initial value for the parameter.
The default is to compute the value internally.
@@ -44,33 +38,40 @@ triangle(lower = 0, upper = 1, link = "elogit",
\eqn{y = 0} axis at \eqn{A} and \eqn{B}.
Here, Fisher scoring is used.
+
On fitting, the \code{extra} slot has components called \code{lower}
and \code{upper} which contains the values of the above arguments
(recycled to the right length).
The fitted values are the mean of the distribution, which is
a little messy to write.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
+
}
%\references{
%}
\author{ T. W. Yee }
\note{
- The response must contain values in \eqn{(A,B)}.
+ The response must contain values in \eqn{(A, B)}.
For most data sets (especially small ones) it is very common for
half-stepping to occur.
+
}
\seealso{
\code{\link{Triangle}}.
+
+
}
\examples{
-tdata = data.frame(y = rtriangle(n <- 3000, theta = 3/4))
-fit = vglm(y ~ 1, triangle(link = "identity"), tdata, trace = TRUE)
+tdata <- data.frame(y = rtriangle(n <- 3000, theta = 3/4))
+fit <- vglm(y ~ 1, triangle(link = "identity"), tdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
head(fit at extra$lower)
diff --git a/man/triangleUC.Rd b/man/triangleUC.Rd
index 9694889..ba227ac 100644
--- a/man/triangleUC.Rd
+++ b/man/triangleUC.Rd
@@ -60,17 +60,17 @@ rtriangle(n, theta, lower = 0, upper = 1)
}
\examples{
-\dontrun{ x = seq(-0.1, 1.1, by = 0.01); theta = 0.75
+\dontrun{ x <- seq(-0.1, 1.1, by = 0.01); theta <- 0.75
plot(x, dtriangle(x, theta = theta), type = "l", col = "blue", las = 1,
main = "Blue is density, orange is cumulative distribution function",
sub = "Purple lines are the 10,20,...,90 percentiles",
ylim = c(0,2), ylab = "")
abline(h = 0, col = "blue", lty = 2)
lines(x, ptriangle(x, theta = theta), col = "orange")
-probs = seq(0.1, 0.9, by = 0.1)
-Q = qtriangle(probs, theta = theta)
+probs <- seq(0.1, 0.9, by = 0.1)
+Q <- qtriangle(probs, theta = theta)
lines(Q, dtriangle(Q, theta = theta), col = "purple", lty = 3, type = "h")
-ptriangle(Q, theta = theta) - probs # Should be all zero
+ptriangle(Q, theta = theta) - probs # Should be all zero
abline(h = probs, col = "purple", lty = 3) }
}
\keyword{distribution}
diff --git a/man/trplot.Rd b/man/trplot.Rd
index 1bc6122..6eab4cb 100644
--- a/man/trplot.Rd
+++ b/man/trplot.Rd
@@ -16,6 +16,8 @@ trplot(object, ...)
methods function of the model. They usually are graphical
parameters, and sometimes they are fed
into the methods function for \code{\link{Coef}}.
+
+
}
}
\details{
@@ -40,7 +42,7 @@ trplot(object, ...)
\references{
-Yee, T. W. (2011)
+Yee, T. W. (2012)
On constrained and unconstrained
quadratic ordination.
\emph{Manuscript in preparation}.
@@ -63,22 +65,21 @@ quadratic ordination.
\examples{
\dontrun{ set.seed(123)
-hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
-
-p1cqo = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars
+p1cqo <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
Trocterr, Zoraspin) ~
WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- fam = quasipoissonff, data = hspider, Crow1positive = FALSE)
+ quasipoissonff, data = hspider, Crow1positive = FALSE)
-nos = ncol(p1cqo at y)
-clr = 1:nos # OR (1:(nos+1))[-7] to omit yellow
+nos <- ncol(depvar(p1cqo))
+clr <- 1:nos # OR (1:(nos+1))[-7] to omit yellow
trplot(p1cqo, whichSpecies = 1:3, log = "xy",
- col = c("blue","orange","green"), lwd = 2, label = TRUE) -> ii
-legend(0.00005, 0.3, paste(ii$species[,1], ii$species[,2], sep = " and "),
- lwd = 2, lty = 1, col = c("blue","orange","green"))
-abline(a = 0, b = 1, lty = "dashed") }
+ col = c("blue", "orange", "green"), lwd = 2, label = TRUE) -> ii
+legend(0.00005, 0.3, paste(ii$species[, 1], ii$species[, 2], sep = " and "),
+ lwd = 2, lty = 1, col = c("blue", "orange", "green"))
+abline(a = 0, b = 1, lty = "dashed", col = "grey") }
}
\keyword{models}
\keyword{regression}
diff --git a/man/trplot.qrrvglm.Rd b/man/trplot.qrrvglm.Rd
index 0089d8e..875ddba 100644
--- a/man/trplot.qrrvglm.Rd
+++ b/man/trplot.qrrvglm.Rd
@@ -29,6 +29,8 @@ trplot.qrrvglm(object, whichSpecies = NULL, add=FALSE, plot.it = TRUE,
response matrix. If character, these must match exactly with the
species' names.
The default is to use all species.
+
+
}
\item{add}{ Logical. Add to an existing plot? If \code{FALSE} (default),
a new plot is made. }
@@ -38,54 +40,80 @@ trplot.qrrvglm(object, whichSpecies = NULL, add=FALSE, plot.it = TRUE,
\item{sitenames}{ Character vector. The names of the sites. }
\item{axes.equal}{ Logical. If \code{TRUE}, the x- and y-axes
will be on the same scale.
+
+
}
\item{cex}{ Character expansion of the labelling of the site names.
Used only if \code{label.sites} is \code{TRUE}.
See the \code{cex} argument in \code{\link[graphics]{par}}.
+
+
}
\item{col}{Color of the lines.
See the \code{col} argument in \code{\link[graphics]{par}}.
Here, \code{nos} is the number of species.
+
+
}
\item{log}{ Character, specifying which (if any) of the x- and
y-axes are to be on a logarithmic scale.
See the \code{log} argument in \code{\link[graphics]{par}}.
+
+
}
\item{lty}{ Line type.
See the \code{lty} argument of \code{\link[graphics]{par}}.
+
+
}
\item{lwd}{ Line width.
See the \code{lwd} argument of \code{\link[graphics]{par}}.
+
+
}
\item{tcol}{Color of the text for the site names.
See the \code{col} argument in \code{\link[graphics]{par}}.
Used only if \code{label.sites} is \code{TRUE}.
+
+
}
\item{xlab}{Character caption for the x-axis.
By default, a suitable caption is found.
See the \code{xlab} argument in \code{\link[graphics]{plot}}
or \code{\link[graphics]{title}}.
+
+
}
\item{ylab}{Character caption for the y-axis.
By default, a suitable caption is found.
See the \code{xlab} argument in \code{\link[graphics]{plot}}
or \code{\link[graphics]{title}}.
+
+
}
\item{main}{ Character, giving the title of the plot.
See the \code{main} argument in \code{\link[graphics]{plot}}
or \code{\link[graphics]{title}}.
+
+
}
\item{type}{ Character, giving the type of plot. A common
option is to use \code{type="l"} for lines only.
See the \code{type} argument of \code{\link[graphics]{plot}}.
+
+
}
\item{check.ok}{ Logical. Whether a check is performed to see
that \code{Norrr = ~ 1} was used.
It doesn't make sense to have a trace plot unless this is so.
+
+
}
\item{\dots}{ Arguments passed into the \code{plot} function
when setting up the entire plot. Useful arguments here include
\code{xlim} and \code{ylim}.
+
+
}
}
\details{
@@ -118,6 +146,8 @@ variables.
}
\item{sitenames}{A character vector of site names, sorted by
the latent variable (from low to high).
+
+
}
}
\references{
@@ -129,7 +159,6 @@ quadratic ordination.
\emph{Manuscript in preparation}.
-
}
\author{ Thomas W. Yee }
@@ -153,19 +182,19 @@ quadratic ordination.
}
-\examples{\dontrun{ set.seed(111) # This leads to the global solution
-# hspider[,1:6] = scale(hspider[,1:6]) # Standardize the environmental variables
-p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
- Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
- WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- fam = poissonff, data = hspider, trace = FALSE)
+\examples{\dontrun{ set.seed(111) # This leads to the global solution
+# hspider[,1:6] <- scale(hspider[,1:6]) # Standardize the environmental variables
+p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi, Auloalbi,
+ Pardlugu, Pardmont, Pardnigr, Pardpull, Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ poissonff, data = hspider, trace = FALSE)
trplot(p1, whichSpecies = 1:3, log = "xy", type = "b", lty = 1,
main = "Trajectory plot of three hunting spiders species",
col = c("blue","red","green"), lwd = 2, label = TRUE) -> ii
-legend(0.00005, 0.3, lwd = 2, lty = 1, col = c("blue","red","green"),
+legend(0.00005, 0.3, lwd = 2, lty = 1, col = c("blue", "red", "green"),
with(ii, paste(species.names[,1], species.names[,2], sep = " and ")))
-abline(a = 0, b = 1, lty = "dashed") # Useful reference line
+abline(a = 0, b = 1, lty = "dashed", col = "grey") # Useful reference line
}
}
\keyword{models}
diff --git a/man/undocumented-methods.Rd b/man/undocumented-methods.Rd
index b5a656a..22c3eaa 100644
--- a/man/undocumented-methods.Rd
+++ b/man/undocumented-methods.Rd
@@ -5,6 +5,17 @@
%
%
%
+% 20120821
+\alias{model.matrix,vsmooth.spline-method}
+%
+% 20120511
+\alias{is.parallel,matrix-method}
+\alias{is.parallel,vglm-method}
+\alias{is.parallel,ANY-method}
+\alias{is.zero,matrix-method}
+\alias{is.zero,vglm-method}
+\alias{is.zero,ANY-method}
+%
%
% 20120215
%\alias{print,vglmff-method}
@@ -20,8 +31,8 @@
\alias{vcov,ANY-method}
\alias{plot,cao,ANY-method}
\alias{plot,qrrvglm,ANY-method}
-\alias{plot,rcam,ANY-method}
-\alias{plot,rcam0,ANY-method}
+\alias{plot,rcim,ANY-method}
+\alias{plot,rcim0,ANY-method}
\alias{plot,uqo,ANY-method}
\alias{plot,vgam,ANY-method}
\alias{plot,vglm,ANY-method}
@@ -72,7 +83,7 @@
\alias{depvar,ANY-method}
\alias{depvar,cao-method}
\alias{depvar,qrrvglm-method}
-\alias{depvar,rcam-method}
+\alias{depvar,rcim-method}
\alias{depvar,rrvglm-method}
\alias{depvar,vlm-method}
%
@@ -128,7 +139,7 @@
\alias{hatvalues,vglm-method}
\alias{hatvalues,cao-method}
\alias{hatvalues,qrrvglm-method}
-\alias{hatvalues,rcam-method}
+\alias{hatvalues,rcim-method}
\alias{hatvalues,rrvglm-method}
%
%
@@ -138,7 +149,7 @@
\alias{hatplot,vglm-method}
\alias{hatplot,cao-method}
\alias{hatplot,qrrvglm-method}
-\alias{hatplot,rcam-method}
+\alias{hatplot,rcim-method}
\alias{hatplot,rrvglm-method}
%
%
@@ -148,7 +159,7 @@
\alias{dfbeta,vglm-method}
\alias{dfbeta,cao-method}
\alias{dfbeta,qrrvglm-method}
-\alias{dfbeta,rcam-method}
+\alias{dfbeta,rcim-method}
\alias{dfbeta,rrvglm-method}
%
%
@@ -157,8 +168,8 @@
\alias{guplot,vlm-method}
%\alias{model.frame,ANY-method}
\alias{model.frame,vlm-method}
-%\alias{plot,rcam0,ANY-method}
-%\alias{plot,rcam,ANY-method}
+%\alias{plot,rcim0,ANY-method}
+%\alias{plot,rcim,ANY-method}
%\alias{plot,cao,ANY-method}
%\alias{plot,vlm,ANY-method}
%\alias{plot,vglm,ANY-method}
@@ -204,7 +215,7 @@
\alias{npred,vlm-method}
\alias{npred,cao-method}
\alias{npred,qrrvglm-method}
-\alias{npred,rcam-method}
+\alias{npred,rcim-method}
\alias{npred,rrvglm-method}
\alias{nvar,ANY-method}
\alias{nvar,vlm-method}
@@ -213,7 +224,7 @@
\alias{nvar,qrrvglm-method}
\alias{nvar,cao-method}
\alias{nvar,vlm-method}
-\alias{nvar,rcam-method}
+\alias{nvar,rcim-method}
\alias{Opt,qrrvglm-method}
\alias{Opt,Coef.qrrvglm-method}
\alias{Opt,uqo-method}
@@ -301,8 +312,8 @@
\alias{summary,grc-method}
\alias{summary,cao-method}
\alias{summary,qrrvglm-method}
-\alias{summary,rcam-method}
-\alias{summary,rcam0-method}
+\alias{summary,rcim-method}
+\alias{summary,rcim0-method}
\alias{summary,rrvglm-method}
\alias{summary,vgam-method}
\alias{summary,vglm-method}
diff --git a/man/uqo.Rd b/man/uqo.Rd
index 44d4bd9..d017509 100644
--- a/man/uqo.Rd
+++ b/man/uqo.Rd
@@ -229,19 +229,18 @@ this is not done.
}
-\examples{ \dontrun{
-set.seed(123) # This leads to the global solution
-hspider[,1:6] = scale(hspider[,1:6]) # Standardized environmental vars
-p1 = cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
- Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
- Trocterr, Zoraspin) ~
- WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
- ITolerances = TRUE, fam = poissonff, data = hspider,
- Crow1positive = TRUE, Bestof=3, trace = FALSE)
+\examples{ \dontrun{ set.seed(123) # This leads to the global solution
+hspider[,1:6] <- scale(hspider[,1:6]) # Standardized environmental vars
+p1 <- cqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+ Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
+ Trocterr, Zoraspin) ~
+ WaterCon + BareSand + FallTwig + CoveMoss + CoveHerb + ReflLux,
+ ITolerances = TRUE, fam = poissonff, data = hspider,
+ Crow1positive = TRUE, Bestof=3, trace = FALSE)
if (deviance(p1) > 1589.0) stop("suboptimal fit obtained")
set.seed(111)
-up1 = uqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
+up1 <- uqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
Auloalbi, Pardlugu, Pardmont, Pardnigr, Pardpull,
Trocterr, Zoraspin) ~ 1,
family = poissonff, data = hspider,
@@ -249,25 +248,25 @@ up1 = uqo(cbind(Alopacce, Alopcune, Alopfabr, Arctlute, Arctperi,
Crow1positive = TRUE, lvstart = -lv(p1))
if (deviance(up1) > 1310.0) stop("suboptimal fit obtained")
-nos = ncol(up1 at y) # Number of species
-clr = (1:(nos+1))[-7] # to omit yellow
+nos <- ncol(up1 at y) # Number of species
+clr <- (1:(nos+1))[-7] # Omit yellow
lvplot(up1, las = 1, y = TRUE, pch = 1:nos, scol = clr, lcol = clr,
pcol = clr, llty = 1:nos, llwd=2)
legend(x=2, y = 135, colnames(up1 at y), col = clr, lty = 1:nos,
lwd=2, merge = FALSE, ncol = 1, x.inter=4.0, bty = "l", cex = 0.9)
# Compare the site scores between the two models
-plot(lv(p1), lv(up1), xlim = c(-3,4), ylim = c(-3,4), las = 1)
-abline(a = 0, b=-1, lty=2, col = "blue", xpd = FALSE)
+plot(lv(p1), lv(up1), xlim = c(-3, 4), ylim = c(-3, 4), las = 1)
+abline(a = 0, b = -1, lty = 2, col = "blue", xpd = FALSE)
cor(lv(p1, ITol = TRUE), lv(up1))
# Another comparison between the constrained and unconstrained models
# The signs are not right so they are similar when reflected about 0
-par(mfrow = c(2,1))
+par(mfrow = c(2, 1))
persp(up1, main = "Red/Blue are the constrained/unconstrained models",
label = TRUE, col = "blue", las = 1)
persp(p1, add = FALSE, col = "red")
-pchisq(deviance(p1) - deviance(up1), df=52-30, lower.tail = FALSE)
+pchisq(deviance(p1) - deviance(up1), df = 52-30, lower.tail = FALSE)
}}
\keyword{models}
\keyword{regression}
diff --git a/man/venice.Rd b/man/venice.Rd
index 2e2ed0f..d4272a6 100644
--- a/man/venice.Rd
+++ b/man/venice.Rd
@@ -122,9 +122,9 @@ Istituzione Centro Previsione e Segnalazioni Maree.
\dontrun{ matplot(venice[["year"]], venice[, -1], xlab = "Year",
ylab = "Sea level (cm)", type = "l") }
-ymat = as.matrix(venice[, paste("r", 1:10, sep = "")])
-fit1 = vgam(ymat ~ s(year, df = 3), gumbel(R = 365, mpv = TRUE),
- data = venice, trace = TRUE, na.action = na.pass)
+ymat <- as.matrix(venice[, paste("r", 1:10, sep = "")])
+fit1 <- vgam(ymat ~ s(year, df = 3), gumbel(R = 365, mpv = TRUE),
+ data = venice, trace = TRUE, na.action = na.pass)
head(fitted(fit1))
\dontrun{ par(mfrow = c(2, 1), xpd = TRUE)
diff --git a/man/vgam-class.Rd b/man/vgam-class.Rd
index 362dca7..ce769b1 100644
--- a/man/vgam-class.Rd
+++ b/man/vgam-class.Rd
@@ -229,6 +229,7 @@ Vector generalized additive models.
have (\code{\link{vglm-class}}), plus the first few slots
described in the section above.
+
}
%~Make other sections like WARNING with \section{WARNING }{....} ~
@@ -240,11 +241,12 @@ Vector generalized additive models.
\code{\link{vglm-class}},
\code{\link{vglmff-class}}.
+
}
\examples{
# Fit a nonparametric proportional odds model
-pneumo = transform(pneumo, let = log(exposure.time))
+pneumo <- transform(pneumo, let = log(exposure.time))
vgam(cbind(normal, mild, severe) ~ s(let),
cumulative(parallel = TRUE), pneumo)
}
diff --git a/man/vgam.Rd b/man/vgam.Rd
index 865d518..837b406 100644
--- a/man/vgam.Rd
+++ b/man/vgam.Rd
@@ -23,18 +23,15 @@ vgam(formula, family, data = list(), weights = NULL, subset = NULL,
\item{formula}{
a symbolic description of the model to be fit.
- The RHS of the formula is applied to each linear/additive predictor.
- Different
- variables in each linear/additive predictor can be chosen by specifying
- constraint matrices.
+ The RHS of the formula is applied to each linear/additive predictor,
+ and usually includes at least one \code{\link[VGAM]{s}} term.
+ Different variables in each linear/additive predictor
+ can be chosen by specifying constraint matrices.
+
}
\item{family}{
- a function of class \code{"vglmff"} (see \code{\link{vglmff-class}})
- describing what statistical model is to be fitted. This is called a
- ``\pkg{VGAM} family function''. See \code{\link{CommonVGAMffArguments}}
- for general information about many types of arguments found in this
- type of function.
+ Same as for \code{\link{vglm}}.
}
\item{data}{
@@ -44,45 +41,20 @@ vgam(formula, family, data = list(), weights = NULL, subset = NULL,
\code{vgam} is called.
}
- \item{weights}{
- an optional vector or matrix of (prior) weights
- to be used in the fitting process.
- If \code{weights} is a matrix, then it must be in
- \emph{matrix-band} form, whereby the first \eqn{M}
- columns of the matrix are the
- diagonals, followed by the upper-diagonal band, followed by the
- band above that, etc. In this case, there can be up to \eqn{M(M+1)}
- columns, with the last column corresponding to the (1,\eqn{M}) elements
- of the weight matrices.
+ \item{weights, subset, na.action}{
+ Same as for \code{\link{vglm}}.
- }
- \item{subset}{
- an optional logical vector specifying a subset of
- observations to
- be used in the fitting process.
-
- }
- \item{na.action}{
- a function which indicates what should happen when
- the data contain \code{NA}s.
- The default is set by the \code{na.action} setting
- of \code{\link[base]{options}}, and is \code{na.fail} if that is unset.
- The ``factory-fresh'' default is \code{na.omit}.
}
\item{etastart, mustart, coefstart}{
Same as for \code{\link{vglm}}.
+
}
\item{control}{
a list of parameters for controlling the fitting process.
See \code{\link{vgam.control}} for details.
- }
- \item{offset}{
- a vector or \eqn{M}-column matrix of offset values.
- These are \emph{a priori} known and are added to the linear/additive
- predictors during fitting.
}
\item{method}{
@@ -91,9 +63,9 @@ vgam(formula, family, data = list(), weights = NULL, subset = NULL,
uses iteratively reweighted least squares (IRLS).
}
- \item{model}{
- a logical value indicating whether the \emph{model frame} should be
- assigned in the \code{model} slot.
+ \item{constraints, model, offset}{
+ Same as for \code{\link{vglm}}.
+
}
\item{x.arg, y.arg}{
@@ -103,37 +75,11 @@ vgam(formula, family, data = list(), weights = NULL, subset = NULL,
matrix; to get the VGAM model matrix type \code{model.matrix(vgamfit)}
where \code{vgamfit} is a \code{vgam} object.
- }
- \item{contrasts}{
- an optional list. See the \code{contrasts.arg} of
- \code{\link{model.matrix.default}}.
-
- }
- \item{constraints}{
- an optional list of constraint matrices. The components
- of the list must be named with the term it corresponds
- to (and it must match in character format exactly).
- Each constraint matrix must have \eqn{M} rows, and be
- of full-column rank. By default, constraint matrices are
- the \eqn{M} by \eqn{M} identity matrix unless arguments
- in the family function itself override these values.
- If \code{constraints} is used it must contain \emph{all}
- the terms; an incomplete list is not accepted.
-
- }
- \item{extra}{
- an optional list with any extra information that might be needed by
- the \pkg{VGAM} family function.
}
- \item{qr.arg}{
- logical value indicating whether the slot \code{qr}, which returns
- the QR decomposition of the VLM model matrix, is returned on the object.
+ \item{contrasts, extra, qr.arg, smart}{
+ Same as for \code{\link{vglm}}.
- }
- \item{smart}{
- logical value indicating whether smart prediction
- (\code{\link{smartpred}}) will be used.
}
\item{\dots}{
@@ -272,14 +218,14 @@ The \code{VGAM} Package.
}
\examples{ # Nonparametric proportional odds model
-pneumo = transform(pneumo, let = log(exposure.time))
+pneumo <- transform(pneumo, let = log(exposure.time))
vgam(cbind(normal, mild, severe) ~ s(let),
cumulative(parallel = TRUE), pneumo)
# Nonparametric logistic regression
-fit = vgam(agaaus ~ s(altitude, df = 2), binomialff, hunua)
+fit <- vgam(agaaus ~ s(altitude, df = 2), binomialff, hunua)
\dontrun{ plot(fit, se = TRUE) }
-pfit = predict(fit, type = "terms", raw = TRUE, se = TRUE)
+pfit <- predict(fit, type = "terms", raw = TRUE, se = TRUE)
names(pfit)
head(pfit$fitted)
head(pfit$se.fit)
@@ -287,12 +233,12 @@ pfit$df
pfit$sigma
# Fit two species simultaneously
-fit2 = vgam(cbind(agaaus, kniexc) ~ s(altitude, df = c(2, 3)),
- binomialff(mv = TRUE), hunua)
+fit2 <- vgam(cbind(agaaus, kniexc) ~ s(altitude, df = c(2, 3)),
+ binomialff(mv = TRUE), hunua)
coef(fit2, matrix = TRUE) # Not really interpretable
\dontrun{ plot(fit2, se = TRUE, overlay = TRUE, lcol = 1:2, scol = 1:2)
-ooo = with(hunua, order(altitude))
+ooo <- with(hunua, order(altitude))
with(hunua, matplot(altitude[ooo], fitted(fit2)[ooo,], ylim = c(0, .8),
xlab = "Altitude (m)", ylab = "Probability of presence", las = 1,
main = "Two plant species' response curves", type = "l", lwd = 2))
diff --git a/man/vgam.control.Rd b/man/vgam.control.Rd
index 3833880..52716c5 100644
--- a/man/vgam.control.Rd
+++ b/man/vgam.control.Rd
@@ -184,7 +184,7 @@ Vector generalized additive models.
}
\examples{
-pneumo = transform(pneumo, let = log(exposure.time))
+pneumo <- transform(pneumo, let = log(exposure.time))
vgam(cbind(normal, mild, severe) ~ s(let, df = 2), multinomial,
data = pneumo, trace = TRUE, eps = 1e-4, maxit = 10)
}
diff --git a/man/vglm-class.Rd b/man/vglm-class.Rd
index 10dea2e..4625e5f 100644
--- a/man/vglm-class.Rd
+++ b/man/vglm-class.Rd
@@ -197,18 +197,22 @@ a more detailed summary of the object. }
}
}
\references{
+
Yee, T. W. and Hastie, T. J. (2003)
Reduced-rank vector generalized linear models.
\emph{Statistical Modelling},
\bold{3}, 15--41.
+
Yee, T. W. and Wild, C. J. (1996)
Vector generalized additive models.
\emph{Journal of the Royal Statistical Society, Series B, Methodological},
\bold{58}, 481--493.
+
\url{http://www.stat.auckland.ac.nz/~yee}
+
}
\author{ Thomas W. Yee }
%\note{ ~~further notes~~ }
@@ -216,14 +220,17 @@ Vector generalized additive models.
%~Make other sections like WARNING with \section{WARNING }{....} ~
\seealso{
-\code{\link{vglm}},
-\code{\link{vglmff-class}},
-\code{\link{vgam-class}}.
+ \code{\link{vglm}},
+ \code{\link{vglmff-class}},
+ \code{\link{vgam-class}}.
+
+
+
}
\examples{
# Multinomial logit model
-pneumo = transform(pneumo, let = log(exposure.time))
+pneumo <- transform(pneumo, let = log(exposure.time))
vglm(cbind(normal, mild, severe) ~ let, multinomial, pneumo)
}
\keyword{classes}
diff --git a/man/vglm.Rd b/man/vglm.Rd
index c50d758..c35c400 100644
--- a/man/vglm.Rd
+++ b/man/vglm.Rd
@@ -7,6 +7,7 @@
This is a very large class of models that includes
generalized linear models (GLMs) as a special case.
+
}
\usage{
vglm(formula, family, data = list(), weights = NULL, subset = NULL,
@@ -40,16 +41,32 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
\code{environment(formula)}, typically the environment
from which \code{vglm} is called.
+
+
+
}
\item{weights}{
an optional vector or matrix of (prior) weights to be used
- in the fitting process. If \code{weights} is a matrix,
- then it must be in \emph{matrix-band} form, whereby the
- first \eqn{M} columns of the matrix are the diagonals,
- followed by the upper-diagonal band, followed by the
- band above that, etc. In this case, there can be up to
- \eqn{M(M+1)} columns, with the last column corresponding
- to the (1,\eqn{M}) elements of the weight matrices.
+ in the fitting process.
+ If the \pkg{VGAM} family function handles multiple responses
+ (\eqn{q > 1} of them, say) then
+ \code{weights} can be a matrix with \eqn{q} columns.
+ Each column matches the respective response.
+ If it is a vector (the usually case) then it is recycled into a
+ matrix with \eqn{q} columns.
+ The values of \code{weights} must be positive; try setting
+ a very small value such as \code{1.0e-8} to effectively
+ delete an observation.
+
+
+% If \code{weights} is a matrix,
+% then it should be must be in \emph{matrix-band} form, whereby the
+% first \eqn{M} columns of the matrix are the diagonals,
+% followed by the upper-diagonal band, followed by the
+% band above that, etc. In this case, there can be up to
+% \eqn{M(M+1)} columns, with the last column corresponding
+% to the (1,\eqn{M}) elements of the weight matrices.
+
}
\item{subset}{
@@ -57,6 +74,8 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
observations to
be used in the fitting process.
+
+
}
\item{na.action}{
a function which indicates what should happen when
@@ -74,6 +93,7 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
should be comparable.
Here, \code{fit} is the fitted object.
+
}
\item{mustart}{
starting values for the fitted values.
@@ -97,7 +117,7 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
\item{offset}{
a vector or \eqn{M}-column matrix of offset values.
These are \emph{a priori} known and are added to the
- linear predictors during fitting.
+ linear/additive predictors during fitting.
}
\item{method}{
@@ -125,6 +145,7 @@ vglm(formula, family, data = list(), weights = NULL, subset = NULL,
an optional list. See the \code{contrasts.arg}
of \code{\link{model.matrix.default}}.
+
}
\item{constraints}{
an optional list of constraint matrices.
@@ -358,6 +379,7 @@ The \code{VGAM} Package.
\code{\link{vgam}}.
Methods functions include
\code{coef.vlm},
+ \code{\link{constraints.vlm}},
\code{\link{hatvaluesvlm}},
\code{\link{predictvglm}},
\code{summary.vglm},
@@ -373,18 +395,18 @@ The \code{VGAM} Package.
print(d.AD <- data.frame(treatment = gl(3, 3),
outcome = gl(3, 1, 9),
counts = c(18,17,15,20,10,20,25,13,12)))
-vglm.D93 = vglm(counts ~ outcome + treatment, family = poissonff,
- data = d.AD, trace = TRUE)
+vglm.D93 <- vglm(counts ~ outcome + treatment, family = poissonff,
+ data = d.AD, trace = TRUE)
summary(vglm.D93)
# Example 2. Multinomial logit model
-pneumo = transform(pneumo, let = log(exposure.time))
+pneumo <- transform(pneumo, let = log(exposure.time))
vglm(cbind(normal, mild, severe) ~ let, multinomial, pneumo)
# Example 3. Proportional odds model
-fit3 = vglm(cbind(normal,mild,severe) ~ let, propodds, pneumo, trace = TRUE)
+fit3 <- vglm(cbind(normal, mild, severe) ~ let, propodds, pneumo)
coef(fit3, matrix = TRUE)
constraints(fit3)
model.matrix(fit3, type = "lm") # LM model matrix
@@ -392,29 +414,29 @@ model.matrix(fit3) # Larger VGLM (or VLM) model matrix
# Example 4. Bivariate logistic model
-fit4 = vglm(cbind(nBnW, nBW, BnW, BW) ~ age, binom2.or, coalminers)
+fit4 <- vglm(cbind(nBnW, nBW, BnW, BW) ~ age, binom2.or, coalminers)
coef(fit4, matrix = TRUE)
-depvar(fit4) # Response are proportions
+depvar(fit4) # Response are proportions
weights(fit4, type = "prior")
# Example 5. The use of the xij argument (simple case).
# The constraint matrix for 'op' has one column.
-nn = 1000
+nn <- 1000
eyesdat = round(data.frame(lop = runif(nn),
rop = runif(nn),
op = runif(nn)), dig = 2)
-eyesdat = transform(eyesdat, eta1 = -1+2*lop,
- eta2 = -1+2*lop)
+eyesdat = transform(eyesdat, eta1 = -1 + 2 * lop,
+ eta2 = -1 + 2 * lop)
eyesdat = transform(eyesdat,
- leye = rbinom(nn, size = 1, prob = logit(eta1, inv = TRUE)),
- reye = rbinom(nn, size = 1, prob = logit(eta2, inv = TRUE)))
+ leye = rbinom(nn, size = 1, prob = logit(eta1, inv = TRUE)),
+ reye = rbinom(nn, size = 1, prob = logit(eta2, inv = TRUE)))
head(eyesdat)
-fit5 = vglm(cbind(leye,reye) ~ op,
- binom2.or(exchangeable = TRUE, zero = 3),
- data = eyesdat, trace = TRUE,
- xij = list(op ~ lop + rop + fill(lop)),
- form2 = ~ op + lop + rop + fill(lop))
+fit5 <- vglm(cbind(leye, reye) ~ op,
+ binom2.or(exchangeable = TRUE, zero = 3),
+ data = eyesdat, trace = TRUE,
+ xij = list(op ~ lop + rop + fill(lop)),
+ form2 = ~ op + lop + rop + fill(lop))
coef(fit5)
coef(fit5, matrix = TRUE)
constraints(fit5)
@@ -422,8 +444,8 @@ constraints(fit5)
\keyword{models}
\keyword{regression}
-%eyesdat$leye = ifelse(runif(n) < 1/(1+exp(-1+2*eyesdat$lop)), 1, 0)
-%eyesdat$reye = ifelse(runif(n) < 1/(1+exp(-1+2*eyesdat$rop)), 1, 0)
+%eyesdat$leye <- ifelse(runif(n) < 1/(1+exp(-1+2*eyesdat$lop)), 1, 0)
+%eyesdat$reye <- ifelse(runif(n) < 1/(1+exp(-1+2*eyesdat$rop)), 1, 0)
%coef(fit, matrix = TRUE, compress = FALSE)
@@ -440,7 +462,7 @@ constraints(fit5)
% head(poly(c(x,...), 3), length(x), drop = FALSE)
%}
%
-%fit6 = vglm(cbind(leye,reye) ~ POLY3(op), trace = TRUE,
+%fit6 = vglm(cbind(leye, reye) ~ POLY3(op), trace = TRUE,
% fam = binom2.or(exchangeable = TRUE, zero=3), data=eyesdat,
% xij = list(POLY3(op) ~ POLY3(lop,rop) + POLY3(rop,lop) +
% fill(POLY3(lop,rop))),
diff --git a/man/vglm.control.Rd b/man/vglm.control.Rd
index c0cd8ed..88dec65 100644
--- a/man/vglm.control.Rd
+++ b/man/vglm.control.Rd
@@ -169,6 +169,7 @@ vglm.control(checkwz = TRUE, criterion = names(.min.criterion.VGAM),
\emph{Statistical Modelling},
\bold{3}, 15--41.
+
}
\author{ Thomas W. Yee}
\note{
@@ -180,6 +181,7 @@ vglm.control(checkwz = TRUE, criterion = names(.min.criterion.VGAM),
predictor specific values.
These are handled using the \code{xij} argument.
+
}
% ~Make other sections like Warning with \section{Warning }{....} ~
@@ -190,27 +192,28 @@ vglm.control(checkwz = TRUE, criterion = names(.min.criterion.VGAM),
The author's homepage has further documentation about
the \code{xij} argument.
+
}
\examples{
# Example 1.
-pneumo = transform(pneumo, let = log(exposure.time))
+pneumo <- transform(pneumo, let = log(exposure.time))
vglm(cbind(normal, mild, severe) ~ let, multinomial, data = pneumo,
crit = "coef", step = 0.5, trace = TRUE, eps = 1e-8, maxit = 40)
# Example 2. The use of the xij argument (simple case).
-ymat = rdiric(n <- 1000, shape = rep(exp(2), len = 4))
-mydat = data.frame(x1 = runif(n), x2 = runif(n), x3 = runif(n), x4 = runif(n),
+ymat <- rdiric(n <- 1000, shape = rep(exp(2), len = 4))
+mydat <- data.frame(x1 = runif(n), x2 = runif(n), x3 = runif(n), x4 = runif(n),
z1 = runif(n), z2 = runif(n), z3 = runif(n), z4 = runif(n))
-mydat = transform(mydat, X = x1, Z = z1)
-mydat = round(mydat, dig = 2)
-fit2 = vglm(ymat ~ X + Z,
- dirichlet(parallel = TRUE), data = mydat, trace = TRUE,
- xij = list(Z ~ z1 + z2 + z3 + z4,
- X ~ x1 + x2 + x3 + x4),
- form2 = ~ Z + z1 + z2 + z3 + z4 +
- X + x1 + x2 + x3 + x4)
+mydat <- transform(mydat, X = x1, Z = z1)
+mydat <- round(mydat, dig = 2)
+fit2 <- vglm(ymat ~ X + Z,
+ dirichlet(parallel = TRUE), data = mydat, trace = TRUE,
+ xij = list(Z ~ z1 + z2 + z3 + z4,
+ X ~ x1 + x2 + x3 + x4),
+ form2 = ~ Z + z1 + z2 + z3 + z4 +
+ X + x1 + x2 + x3 + x4)
head(model.matrix(fit2, type = "lm")) # LM model matrix
head(model.matrix(fit2, type = "vlm")) # Big VLM model matrix
coef(fit2)
@@ -227,27 +230,27 @@ plotvgam(fit2, xlab = "z1") # Correct
# Example 3. The use of the xij argument (complex case).
set.seed(123)
-coalminers = transform(coalminers,
- Age = (age - 42) / 5,
- dum1 = round(runif(nrow(coalminers)), dig = 2),
- dum2 = round(runif(nrow(coalminers)), dig = 2),
- dum3 = round(runif(nrow(coalminers)), dig = 2),
- dumm = round(runif(nrow(coalminers)), dig = 2))
-BS = function(x, ..., df = 3) bs(c(x,...), df = df)[1:length(x),,drop = FALSE]
-NS = function(x, ..., df = 3) ns(c(x,...), df = df)[1:length(x),,drop = FALSE]
+coalminers <- transform(coalminers,
+ Age = (age - 42) / 5,
+ dum1 = round(runif(nrow(coalminers)), dig = 2),
+ dum2 = round(runif(nrow(coalminers)), dig = 2),
+ dum3 = round(runif(nrow(coalminers)), dig = 2),
+ dumm = round(runif(nrow(coalminers)), dig = 2))
+BS <- function(x, ..., df = 3) bs(c(x,...), df = df)[1:length(x),,drop = FALSE]
+NS <- function(x, ..., df = 3) ns(c(x,...), df = df)[1:length(x),,drop = FALSE]
# Equivalently...
-BS = function(x, ..., df = 3) head(bs(c(x,...), df = df), length(x), drop = FALSE)
-NS = function(x, ..., df = 3) head(ns(c(x,...), df = df), length(x), drop = FALSE)
-
-fit3 = vglm(cbind(nBnW,nBW,BnW,BW) ~ Age + NS(dum1, dum2),
- fam = binom2.or(exchangeable = TRUE, zero = 3),
- xij = list(NS(dum1, dum2) ~ NS(dum1, dum2) +
- NS(dum2, dum1) +
- fill(NS( dum1))),
- form2 = ~ NS(dum1, dum2) + NS(dum2, dum1) + fill(NS(dum1)) +
- dum1 + dum2 + dum3 + Age + age + dumm,
- data = coalminers, trace = TRUE)
+BS <- function(x, ..., df = 3) head(bs(c(x,...), df = df), length(x), drop = FALSE)
+NS <- function(x, ..., df = 3) head(ns(c(x,...), df = df), length(x), drop = FALSE)
+
+fit3 <- vglm(cbind(nBnW,nBW,BnW,BW) ~ Age + NS(dum1, dum2),
+ fam = binom2.or(exchangeable = TRUE, zero = 3),
+ xij = list(NS(dum1, dum2) ~ NS(dum1, dum2) +
+ NS(dum2, dum1) +
+ fill(NS( dum1))),
+ form2 = ~ NS(dum1, dum2) + NS(dum2, dum1) + fill(NS(dum1)) +
+ dum1 + dum2 + dum3 + Age + age + dumm,
+ data = coalminers, trace = TRUE)
head(model.matrix(fit3, type = "lm")) # LM model matrix
head(model.matrix(fit3, type = "vlm")) # Big VLM model matrix
coef(fit3)
@@ -268,13 +271,13 @@ coef(fit3, matrix = TRUE)
%# Here is one method to handle the xij argument with a term that
%# produces more than one column in the model matrix.
%# The constraint matrix for 'op' has one column.
-%POLY3 = function(x, ...) {
+%POLY3 <- function(x, ...) {
% # A cubic; ensures that the basis functions are the same.
% poly(c(x,...), 3)[1:length(x),]
%}
%
%\dontrun{
-%fit4 = vglm(cbind(leye,reye) ~ POLY3(op), trace=TRUE,
+%fit4 <- vglm(cbind(leye,reye) ~ POLY3(op), trace=TRUE,
% fam = binom2.or(exchangeable=TRUE, zero=3), data=eyesdata,
% xij = list(POLY3(op) ~ POLY3(lop,rop) + POLY3(rop,lop) +
% fill(POLY3(lop,rop))),
diff --git a/man/vglmff-class.Rd b/man/vglmff-class.Rd
index e7833ec..cb92ff4 100644
--- a/man/vglmff-class.Rd
+++ b/man/vglmff-class.Rd
@@ -53,7 +53,7 @@ Objects can be created by calls of the form \code{new("vglmff", ...)}.
At present only a very few \pkg{VGAM} family functions have this
feature implemented.
Those that do do not require specifying the \code{Musual}
- argument when used with \code{\link{rcam}}.
+ argument when used with \code{\link{rcim}}.
}
@@ -87,6 +87,10 @@ Objects can be created by calls of the form \code{new("vglmff", ...)}.
given the fitted values, returns the linear/additive predictors.
If present, the function must have arguments
\code{function(mu, extra = NULL)}.
+ Most \pkg{VGAM} family functions do not have
+ a \code{linkfun} function. They largely are for
+ classical exponential families, i.e., GLMs.
+
}
\item{\code{loglikelihood}:}{
@@ -250,13 +254,13 @@ The file is amongst other \pkg{VGAM} PDF documentation.
\code{\link{vglm}},
\code{\link{vgam}},
\code{\link{rrvglm}},
- \code{\link{rcam}}.
+ \code{\link{rcim}}.
}
\examples{
cratio()
cratio(link = "cloglog")
-cratio(link = cloglog, reverse = TRUE)
+cratio(link = "cloglog", reverse = TRUE)
}
\keyword{classes}
diff --git a/man/vonmises.Rd b/man/vonmises.Rd
index ad7ffad..d61db46 100644
--- a/man/vonmises.Rd
+++ b/man/vonmises.Rd
@@ -7,10 +7,8 @@
von Mises distribution by maximum likelihood estimation.
}
\usage{
-vonmises(llocation = "elogit", lscale = "loge",
- elocation = if (llocation == "elogit") list(min = 0, max = 2 * pi) else
- list(), escale = list(), ilocation = NULL,
- iscale = NULL, imethod = 1, zero = NULL)
+vonmises(llocation = elogit(min = 0, max = 2 * pi), lscale = "loge",
+ ilocation = NULL, iscale = NULL, imethod = 1, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -21,11 +19,6 @@ vonmises(llocation = "elogit", lscale = "loge",
For \eqn{k}, a log link is the default because the parameter is positive.
}
- \item{elocation, escale}{
- List. Extra argument for each of the link functions.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{ilocation}{
Initial value for the location \eqn{a} parameter.
By default, an initial value is chosen internally using
@@ -84,6 +77,7 @@ vonmises(llocation = "elogit", lscale = "loge",
and
\eqn{\eta_2=\log(k)}{eta2=log(k)} for this family function.
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
@@ -91,8 +85,10 @@ vonmises(llocation = "elogit", lscale = "loge",
\code{\link{rrvglm}}
and \code{\link{vgam}}.
+
}
\references{
+
Evans, M., Hastings, N. and Peacock, B. (2000)
\emph{Statistical Distributions},
New York: Wiley-Interscience, Third edition.
@@ -121,18 +117,20 @@ New York: Wiley-Interscience, Third edition.
\code{\link[base]{Bessel}},
\code{\link{cardioid}}.
+
\pkg{CircStats} and \pkg{circular} currently have a lot more
R functions for circular data than the \pkg{VGAM} package.
+
}
\examples{
-vdata = data.frame(x2 = runif(nn <- 1000))
-vdata = transform(vdata, y = rnorm(nn, m = 2+x2, sd = exp(0.2))) # Bad data!!
-fit = vglm(y ~ x2, vonmises(zero = 2), vdata, trace = TRUE)
+vdata <- data.frame(x2 = runif(nn <- 1000))
+vdata <- transform(vdata, y = rnorm(nn, m = 2+x2, sd = exp(0.2))) # Bad data!!
+fit <- vglm(y ~ x2, vonmises(zero = 2), vdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit)
-with(vdata, range(y)) # Original data
-range(depvar(fit)) # Processed data is in [0,2*pi)
+with(vdata, range(y)) # Original data
+range(depvar(fit)) # Processed data is in [0,2*pi)
}
\keyword{models}
\keyword{regression}
diff --git a/man/vsmooth.spline.Rd b/man/vsmooth.spline.Rd
index cfbb249..58fcfd5 100644
--- a/man/vsmooth.spline.Rd
+++ b/man/vsmooth.spline.Rd
@@ -6,8 +6,9 @@
Fits a vector cubic smoothing spline.
}
\usage{
-vsmooth.spline(x, y, w = NULL, df = rep(5, M), spar = NULL, all.knots = FALSE,
- iconstraint = diag(M), xconstraint = diag(M),
+vsmooth.spline(x, y, w = NULL, df = rep(5, M), spar = NULL,
+ all.knots = FALSE, iconstraint = diag(M),
+ xconstraint = diag(M),
constraints = list("(Intercepts)" = diag(M), x = diag(M)),
var.arg = FALSE, scale.w = TRUE, nk = NULL,
control.spar = list())
@@ -172,32 +173,33 @@ Heidelberg: Physica-Verlag.
}
\examples{
-nn = 20; x = 2 + 5*(nn:1)/nn
-x[2:4] = x[5:7] # Allow duplication
-y1 = sin(x) + rnorm(nn, sd = 0.13)
-y2 = cos(x) + rnorm(nn, sd = 0.13)
-y3 = 1 + sin(x) + rnorm(nn, sd = 0.13) # Run this for constraints
-y = cbind(y1, y2, y3)
-ww = cbind(rep(3,nn), 4, (1:nn)/nn)
-
-(fit = vsmooth.spline(x, y, w = ww, df = 5))
+nn <- 20; x <- 2 + 5*(nn:1)/nn
+x[2:4] <- x[5:7] # Allow duplication
+y1 <- sin(x) + rnorm(nn, sd = 0.13)
+y2 <- cos(x) + rnorm(nn, sd = 0.13)
+y3 <- 1 + sin(x) + rnorm(nn, sd = 0.13) # Run this for constraints
+y <- cbind(y1, y2, y3)
+ww <- cbind(rep(3, nn), 4, (1:nn)/nn)
+
+(fit <- vsmooth.spline(x, y, w = ww, df = 5))
\dontrun{
plot(fit) # The 1st and 3rd functions do not differ by a constant
}
-mat = matrix(c(1,0,1, 0,1,0), 3, 2)
-(fit2 = vsmooth.spline(x, y, w = ww, df = 5, iconstr = mat, xconstr = mat))
+mat <- matrix(c(1,0,1, 0,1,0), 3, 2)
+(fit2 <- vsmooth.spline(x, y, w = ww, df = 5, iconstr = mat, xconstr = mat))
# The 1st and 3rd functions do differ by a constant:
-mycols = c("orange", "blue", "orange")
+mycols <- c("orange", "blue", "orange")
\dontrun{ plot(fit2, lcol = mycols, pcol = mycols, las = 1) }
-p = predict(fit, x = fit at x, deriv = 0)
+
+p <- predict(fit, x = model.matrix(fit, type = "lm"), deriv = 0)
max(abs(fit at y - with(p, y))) # Should be zero
-par(mfrow = c(3, 1))
-ux = seq(1, 8, len = 100)
+par(mfrow <- c(3, 1))
+ux <- seq(1, 8, len = 100)
for(d in 1:3) {
- p = predict(fit, x = ux, deriv = d)
+ p <- predict(fit, x = ux, deriv = d)
\dontrun{with(p, matplot(x, y, type = "l", main = paste("deriv =", d),
lwd = 2, ylab = "", cex.axis = 1.5,
cex.lab = 1.5, cex.main = 1.5)) }
diff --git a/man/waitakere.Rd b/man/waitakere.Rd
index 09bbde2..e99fe19 100644
--- a/man/waitakere.Rd
+++ b/man/waitakere.Rd
@@ -52,8 +52,8 @@
}
\examples{
-fit = vgam(agaaus ~ s(altitude, df = 2), binomialff, waitakere)
+fit <- vgam(agaaus ~ s(altitude, df = 2), binomialff, waitakere)
head(predict(fit, waitakere, type = "response"))
-\dontrun{ plot(fit, se = TRUE, lcol = "red", scol = "blue") }
+\dontrun{ plot(fit, se = TRUE, lcol = "orange", scol = "blue") }
}
\keyword{datasets}
diff --git a/man/wald.Rd b/man/wald.Rd
index c278c79..1ccc35a 100644
--- a/man/wald.Rd
+++ b/man/wald.Rd
@@ -8,13 +8,12 @@ by maximum likelihood estimation.
}
\usage{
-wald(link.lambda = "loge", earg = list(), init.lambda = NULL)
+wald(link.lambda = "loge", init.lambda = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link.lambda, earg}{
- Parameter link function and its extra argument for
- the \eqn{\lambda}{lambda} parameter.
+ \item{link.lambda}{
+ Parameter link function for the \eqn{\lambda}{lambda} parameter.
See \code{\link{Links}} for more choices and general information.
}
diff --git a/man/weibull.Rd b/man/weibull.Rd
index c8da7af..e1b2df3 100644
--- a/man/weibull.Rd
+++ b/man/weibull.Rd
@@ -12,8 +12,8 @@
}
\usage{
weibull(lshape = "loge", lscale = "loge",
- eshape = list(), escale = list(),
- ishape = NULL, iscale = NULL, nrfs = 1, imethod = 1, zero = 2)
+ ishape = NULL, iscale = NULL, nrfs = 1,
+ probs.y = c(0.2, 0.5, 0.8), imethod = 1, zero = -2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -23,16 +23,12 @@ weibull(lshape = "loge", lscale = "loge",
(positive) scale parameter (called \eqn{b} below).
See \code{\link{Links}} for more choices.
- }
- \item{eshape, escale}{
- Extra argument for the respective links.
- See \code{earg} in \code{\link{Links}} for general information.
}
- Parameter link functions applied to the
\item{ishape, iscale}{
Optional initial values for the shape and scale parameters.
+
}
\item{nrfs}{
Currently this argument is ignored.
@@ -42,6 +38,7 @@ weibull(lshape = "loge", lscale = "loge",
The default value uses a mixture of the two algorithms, and retaining
positive-definite working weights.
+
}
\item{imethod}{
Initialization method used if there are censored observations.
@@ -49,11 +46,8 @@ weibull(lshape = "loge", lscale = "loge",
}
- \item{zero}{
- An integer specifying which linear/additive predictor is to be modelled
- as an intercept only. The value must be from the set \{1,2\},
- which correspond to the shape and scale parameters respectively.
- Setting \code{zero = NULL} means none of them.
+ \item{zero, probs.y}{
+ Details at \code{\link{CommonVGAMffArguments}}.
}
}
@@ -78,14 +72,33 @@ weibull(lshape = "loge", lscale = "loge",
This \pkg{VGAM} family function currently does not handle
censored data.
Fisher scoring is used to estimate the two parameters.
- Although the Fisher information matrices used here are valid
+ Although the expected information matrices used here are valid
in all regions of the parameter space,
the regularity conditions for maximum
likelihood estimation are satisfied only if \eqn{a>2}
(according to Kleiber and Kotz (2003)).
If this is violated then a warning message is issued.
- One can enforce \eqn{a>2} by choosing \code{lshape = "logoff"}
- and \code{eshape = list(offset = -2)}.
+ One can enforce \eqn{a>2} by
+ choosing \code{lshape = logoff(offset = -2)}.
+ Common values of the shape parameter lie between 0.5 and 3.5.
+
+
+ Summarized in Harper et al. (2011),
+ for inference, there are 4 cases to consider.
+ If \eqn{a \leq 1} then the MLEs are not consistent
+ (and the smallest observation becomes a hyperefficient
+ solution for the location parameter in the 3-parameter case).
+ If \eqn{1 < a < 2} then MLEs exist but are not asymptotically normal.
+ If \eqn{a = 2} then the MLEs exist and are normal and asymptotically
+ efficient but with a slower convergence rate than when \eqn{a > 2}.
+ If \eqn{a > 2} then MLEs have classical asymptotic properties.
+
+
+ The 3-parameter (location is the third parameter) Weibull can
+ be estimated by maximizing a profile log-likelihood (see,
+ e.g., Harper et al. (2011) and Lawless (2003)), else try
+ \code{\link{gev}} which is a better parameterization.
+
}
@@ -107,6 +120,13 @@ Johnson, N. L. and Kotz, S. and Balakrishnan, N. (1994)
2nd edition, Volume 1, New York: Wiley.
+Lawless, J. F. (2003)
+\emph{Statistical Models and Methods for Lifetime Data},
+2nd ed.
+{Hoboken, NJ, USA: John Wiley & Sons}.
+
+
+
Rinne, Horst. (2009)
\emph{The Weibull Distribution: A Handbook}.
Boca Raton, FL, USA: CRC Press.
@@ -121,6 +141,15 @@ Weibull and GE distributions,
3130--3144.
+Harper, W. V. and Eschenbach, T. G. and James, T. R. (2011)
+Concerns about Maximum Likelihood Estimation for
+ the Three-Parameter {W}eibull Distribution:
+ Case Study of Statistical Software,
+\emph{The American Statistician},
+\bold{65(1)},
+{44--54}.
+
+
Smith, R. L. (1985)
Maximum likelihood estimation in a class of nonregular cases.
\emph{Biometrika}, \bold{72}, 67--90.
@@ -142,8 +171,11 @@ Weibull and GE distributions,
make use the two initial value arguments.
+ This \pkg{VGAM} family function handles multiple responses.
+
+
The Weibull distribution is often an alternative to the lognormal
- distribution. The inverse Weibull distribution, which is that of
+ distribution. The inverse Weibull distribution, which is that of
\eqn{1/Y} where \eqn{Y} has a Weibull(\eqn{a,b}) distribution, is
known as the log-Gompertz distribution.
@@ -168,6 +200,7 @@ Weibull and GE distributions,
If the shape parameter is less than two then misleading inference may
result, e.g., in the \code{summary} and \code{vcov} of the object.
+
}
\seealso{
@@ -175,14 +208,16 @@ Weibull and GE distributions,
\code{\link{gev}},
\code{\link{lognormal}},
\code{\link{expexp}}.
+ \code{\link{gumbelII}}.
}
\examples{
-# Complete data
-wdata = data.frame(x2 = runif(nn <- 1000))
-wdata = transform(wdata, y = rweibull(nn, shape = exp(1 + x2), scale = exp(-2)))
-fit = vglm(y ~ x2, weibull, wdata, trace = TRUE)
+wdata <- data.frame(x2 = runif(nn <- 1000)) # Complete data
+wdata <- transform(wdata,
+ y1 = rweibull(nn, shape = exp(1 + x2), scale = exp(-2)),
+ y2 = rweibull(nn, shape = exp(2 - x2), scale = exp( 1)))
+fit <- vglm(cbind(y1, y2) ~ x2, weibull, wdata, trace = TRUE)
coef(fit, matrix = TRUE)
vcov(fit)
summary(fit)
diff --git a/man/weightsvglm.Rd b/man/weightsvglm.Rd
index 2506732..53b5fa8 100644
--- a/man/weightsvglm.Rd
+++ b/man/weightsvglm.Rd
@@ -115,27 +115,28 @@ weightsvglm(object, type = c("prior", "working"),
\code{\link{vglmff-class}},
\code{\link{vglm}}.
+
}
\examples{
-pneumo = transform(pneumo, let = log(exposure.time))
-(fit = vglm(cbind(normal, mild, severe) ~ let,
+pneumo <- transform(pneumo, let = log(exposure.time))
+(fit <- vglm(cbind(normal, mild, severe) ~ let,
cumulative(parallel = TRUE, reverse = TRUE), pneumo))
-depvar(fit) # These are sample proportions
+depvar(fit) # These are sample proportions
weights(fit, type = "prior", matrix = FALSE) # Number of observations
# Look at the working residuals
-nn = nrow(model.matrix(fit, type = "lm"))
-M = ncol(predict(fit))
+nn <- nrow(model.matrix(fit, type = "lm"))
+M <- ncol(predict(fit))
-temp = weights(fit, type = "working", deriv = TRUE)
-wz = m2adefault(temp$weights, M = M) # In array format
-wzinv = array(apply(wz, 3, solve), c(M, M, nn))
-wresid = matrix(NA, nn, M) # Working residuals
+temp <- weights(fit, type = "working", deriv = TRUE)
+wz <- m2adefault(temp$weights, M = M) # In array format
+wzinv <- array(apply(wz, 3, solve), c(M, M, nn))
+wresid <- matrix(NA, nn, M) # Working residuals
for(ii in 1:nn)
- wresid[ii,] = wzinv[, , ii, drop = TRUE] \%*\% temp$deriv[ii, ]
-max(abs(c(resid(fit, type = "w")) - c(wresid))) # Should be 0
+ wresid[ii,] <- wzinv[, , ii, drop = TRUE] \%*\% temp$deriv[ii, ]
+max(abs(c(resid(fit, type = "work")) - c(wresid))) # Should be 0
-(z <- predict(fit) + wresid) # Adjusted dependent vector
+(zedd <- predict(fit) + wresid) # Adjusted dependent vector
}
\keyword{models}
\keyword{regression}
diff --git a/man/wffc.P2star.Rd b/man/wffc.P2star.Rd
index 350f13f..1e83dc7 100644
--- a/man/wffc.P2star.Rd
+++ b/man/wffc.P2star.Rd
@@ -79,7 +79,7 @@ wffc.P3star(length, c1 = 100, min.eligible = 0.18, ppm = 2000)
}
\seealso{ \code{\link{wffc}}. }
\examples{
-\dontrun{ fishlength = seq(0.0, 0.72, by = 0.001)
+\dontrun{ fishlength <- seq(0.0, 0.72, by = 0.001)
plot(fishlength, wffc.P2star(fishlength), type = "l", col = "blue",
las = 1, lty = "dashed", lwd = 2, las = 1, cex.main = 0.8,
xlab = "Fish length (m)", ylab = "Competition points",
diff --git a/man/wffc.Rd b/man/wffc.Rd
index 75a0f68..fc88123 100644
--- a/man/wffc.Rd
+++ b/man/wffc.Rd
@@ -172,16 +172,16 @@ summary(wffc)
with(wffc, table(water, session))
# Obtain some simple plots
-waihou = subset(wffc, water == "Waihou")
-waimak = subset(wffc, water == "Waimakariri")
-whang = subset(wffc, water == "Whanganui")
-otam = subset(wffc, water == "Otamangakau")
-roto = subset(wffc, water == "Rotoaira")
-minlength = min(wffc[,"length"])
-maxlength = max(wffc[,"length"])
-nwater = c("Waihou" = nrow(waihou), "Waimakariri" = nrow(waimak),
- "Whanganui" = nrow(whang), "Otamangakau" = nrow(otam),
- "Rotoaira" = nrow(roto))
+waihou <- subset(wffc, water == "Waihou")
+waimak <- subset(wffc, water == "Waimakariri")
+whang <- subset(wffc, water == "Whanganui")
+otam <- subset(wffc, water == "Otamangakau")
+roto <- subset(wffc, water == "Rotoaira")
+minlength <- min(wffc[,"length"])
+maxlength <- max(wffc[,"length"])
+nwater <- c("Waihou" = nrow(waihou), "Waimakariri" = nrow(waimak),
+ "Whanganui" = nrow(whang), "Otamangakau" = nrow(otam),
+ "Rotoaira" = nrow(roto))
\dontrun{
par(mfrow = c(2,3), las = 1)
# Overall distribution of length
diff --git a/man/wffc.indiv.Rd b/man/wffc.indiv.Rd
index 61a4ade..744a283 100644
--- a/man/wffc.indiv.Rd
+++ b/man/wffc.indiv.Rd
@@ -31,6 +31,7 @@
See also \code{\link{wffc}} and \code{\link{wffc.teams}} for more
details and links.
+
}
%\source{
% \url{http://www.2008worldflyfishingchamps.com/}.
@@ -41,6 +42,7 @@
\emph{Fisheries Research},
\bold{101}, 116--126.
+
}
\examples{
summary(wffc.indiv)
diff --git a/man/xs.nz.Rd b/man/xs.nz.Rd
index 01bcab8..41a3cc6 100644
--- a/man/xs.nz.Rd
+++ b/man/xs.nz.Rd
@@ -110,7 +110,7 @@
}
\item{\code{acne}}{a numeric vector, have you ever
- received treatment from a doctor for acne?
+ received treatment from a doctor for acne (pimples)?
}
@@ -404,17 +404,15 @@ conceivable that some participants had poor reading skills!
}
-
-
\examples{
data(xs.nz)
summary(xs.nz)
# Handling of factors requires care
is.factor(xs.nz$babies) # TRUE
-summary(xs.nz$babies) # Note the "-"s
+summary(xs.nz$babies) # Note the "-"s
charbabies <- as.character(xs.nz$babies)
summary(as.numeric(charbabies)) # "-"s converted to NAs + warning
-table(as.numeric(charbabies)) # Ditto
+table(as.numeric(charbabies)) # Ditto
}
\keyword{datasets}
diff --git a/man/yeo.johnson.Rd b/man/yeo.johnson.Rd
index 33c6116..d49cec4 100644
--- a/man/yeo.johnson.Rd
+++ b/man/yeo.johnson.Rd
@@ -71,12 +71,12 @@ Quantile regression via vector generalized additive models.
}
\examples{
-y = seq(-4, 4, len = (nn <- 200))
-ltry = c(0, 0.5, 1, 1.5, 2) # Try these values of lambda
-lltry = length(ltry)
-psi = matrix(as.numeric(NA), nn, lltry)
+y <- seq(-4, 4, len = (nn <- 200))
+ltry <- c(0, 0.5, 1, 1.5, 2) # Try these values of lambda
+lltry <- length(ltry)
+psi <- matrix(as.numeric(NA), nn, lltry)
for(ii in 1:lltry)
- psi[,ii] = yeo.johnson(y, lambda = ltry[ii])
+ psi[,ii] <- yeo.johnson(y, lambda = ltry[ii])
\dontrun{
matplot(y, psi, type = "l", ylim = c(-4, 4), lwd = 2, lty = 1:lltry,
diff --git a/man/yip88.Rd b/man/yip88.Rd
index 4934fb4..ca52e03 100644
--- a/man/yip88.Rd
+++ b/man/yip88.Rd
@@ -7,11 +7,11 @@
}
\usage{
-yip88(link.lambda = "loge", n.arg = NULL)
+yip88(link = "loge", n.arg = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link.lambda}{
+ \item{link}{
Link function for the usual \eqn{\lambda}{lambda} parameter.
See \code{\link{Links}} for more choices.
@@ -115,32 +115,32 @@ model.
}
\examples{
-phi = 0.35; lambda = 2 # Generate some artificial data
-y = rzipois(n <- 1000, lambda, phi)
+phi <- 0.35; lambda <- 2 # Generate some artificial data
+y <- rzipois(n <- 1000, lambda, phi)
table(y)
# Two equivalent ways of fitting the same model
-fit1 = vglm(y ~ 1, yip88(n = length(y)), subset = y > 0, trace = TRUE)
-fit2 = vglm(y ~ 1, yip88, trace = TRUE, crit = "c")
-(true.mean = (1-phi) * lambda)
+fit1 <- vglm(y ~ 1, yip88(n = length(y)), subset = y > 0)
+fit2 <- vglm(y ~ 1, yip88, trace = TRUE, crit = "coef")
+(true.mean <- (1-phi) * lambda)
mean(y)
head(fitted(fit1))
-fit1 at misc$phi # The estimate of phi
+fit1 at misc$pstr0 # The estimate of phi
# Compare the ZIP with the positive Poisson distribution
-pp = vglm(y ~ 1, pospoisson, subset = y > 0, trace = TRUE, crit = "c")
+pp <- vglm(y ~ 1, pospoisson, subset = y > 0, crit = "c")
coef(pp)
Coef(pp)
-coef(fit1) - coef(pp) # Same
-head(fitted(fit1) - fitted(pp)) # Different
+coef(fit1) - coef(pp) # Same
+head(fitted(fit1) - fitted(pp)) # Different
# Another example (Angers and Biswas, 2003) ---------------------
-abdata = data.frame(y = 0:7, w = c(182, 41, 12, 2, 2, 0, 0, 1))
-abdata = subset(abdata, w > 0)
+abdata <- data.frame(y = 0:7, w = c(182, 41, 12, 2, 2, 0, 0, 1))
+abdata <- subset(abdata, w > 0)
-yy = with(abdata, rep(y, w))
-fit3 = vglm(yy ~ 1, yip88(n = length(yy)), subset = yy > 0, trace = TRUE)
-fit3 at misc$phi # Estimate of phi (they get 0.5154 with SE 0.0707)
+yy <- with(abdata, rep(y, w))
+fit3 <- vglm(yy ~ 1, yip88(n = length(yy)), subset = yy > 0)
+fit3 at misc$pstr0 # Estimate of phi (they get 0.5154 with SE 0.0707)
coef(fit3, matrix = TRUE)
Coef(fit3) # Estimate of lambda (they get 0.6997 with SE 0.1520)
head(fitted(fit3))
diff --git a/man/yulesimon.Rd b/man/yulesimon.Rd
index 1013e00..e91124e 100644
--- a/man/yulesimon.Rd
+++ b/man/yulesimon.Rd
@@ -7,14 +7,15 @@
}
\usage{
-yulesimon(link = "loge", earg = list(), irho = NULL, nsimEIM = 200)
+yulesimon(link = "loge", irho = NULL, nsimEIM = 200, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link, earg}{
- Link function and extra argument for the \eqn{\rho}{rho} parameter.
+ \item{link}{
+ Link function for the \eqn{\rho}{rho} parameter.
See \code{\link{Links}} for more choices and for general information.
+
}
\item{irho}{
Optional initial value for the (positive) parameter.
@@ -22,10 +23,12 @@ yulesimon(link = "loge", earg = list(), irho = NULL, nsimEIM = 200)
The default is to obtain an initial value internally. Use this argument
if the default fails.
+
}
- \item{nsimEIM}{
+ \item{nsimEIM, zero}{
See \code{\link{CommonVGAMffArguments}} for more information.
+
}
}
\details{
@@ -43,12 +46,21 @@ yulesimon(link = "loge", earg = list(), irho = NULL, nsimEIM = 200)
\eqn{\rho^2/((\rho-1)^2 (\rho-2))}{rho^2/((rho-1)^2 (rho-2))}
provided \eqn{\rho > 2}{rho > 2}.
+
+
+ The distribution was named after Udny Yule and Herbert A. Simon.
+ Simon originally called it the Yule distribution.
+ This family function can handle multiple responses.
+
+
+
}
\value{
An object of class \code{"vglmff"} (see \code{\link{vglmff-class}}).
The object is used by modelling functions such as \code{\link{vglm}}
and \code{\link{vgam}}.
+
}
\references{
@@ -58,20 +70,21 @@ yulesimon(link = "loge", earg = list(), irho = NULL, nsimEIM = 200)
\bold{42},
425--440.
+
}
\author{ T. W. Yee }
%\note{
%}
\seealso{
- \code{\link{ryules}}.
+ \code{\link{ryules}}.
}
\examples{
-ydata = data.frame(x2 = runif(nn <- 1000))
-ydata = transform(ydata, y = ryules(nn, rho = exp(1.5-x2)))
+ydata <- data.frame(x2 = runif(nn <- 1000))
+ydata <- transform(ydata, y = ryules(nn, rho = exp(1.5 - x2)))
with(ydata, table(y))
-fit = vglm(y ~ x2, yulesimon, ydata, trace = TRUE)
+fit <- vglm(y ~ x2, yulesimon, ydata, trace = TRUE)
coef(fit, matrix = TRUE)
summary(fit)
}
diff --git a/man/yulesimonUC.Rd b/man/yulesimonUC.Rd
index 024f713..54c495a 100644
--- a/man/yulesimonUC.Rd
+++ b/man/yulesimonUC.Rd
@@ -66,7 +66,7 @@ ryules(20, 2.1)
round(1000 * dyules(1:8, 2))
table(ryules(1000, 2))
-\dontrun{ x = 0:6
+\dontrun{ x <- 0:6
plot(x, dyules(x, rho = 2.2), type = "h", las = 1, col = "blue") }
}
\keyword{distribution}
diff --git a/man/zabinomUC.Rd b/man/zabinomUC.Rd
index f54cd20..0bcf65c 100644
--- a/man/zabinomUC.Rd
+++ b/man/zabinomUC.Rd
@@ -65,11 +65,11 @@ rzabinom(n, size, prob, pobs0 = 0)
}
\examples{
-size <- 10; prob = 0.15; pobs0 <- 0.05; x <- (-1):7
+size <- 10; prob <- 0.15; pobs0 <- 0.05; x <- (-1):7
dzabinom(x, size = size, prob = prob, pobs0 = pobs0)
table(rzabinom(100, size = size, prob = prob, pobs0 = pobs0))
-\dontrun{ x = 0:10
+\dontrun{ x <- 0:10
barplot(rbind(dzabinom(x, size = size, prob = prob, pobs0 = pobs0),
dbinom(x, size = size, prob = prob)),
beside = TRUE, col = c("blue", "orange"), cex.main = 0.7, las = 1,
diff --git a/man/zabinomial.Rd b/man/zabinomial.Rd
index 1d069b0..7b7db32 100644
--- a/man/zabinomial.Rd
+++ b/man/zabinomial.Rd
@@ -9,9 +9,8 @@
}
\usage{
-zabinomial(lprob = "logit", eprob = list(),
- lpobs0 = "logit", epobs0 = list(),
- iprob = NULL, ipobs0 = NULL,
+zabinomial(lprob = "logit", lpobs0 = "logit",
+ iprob = NULL, ipobs0 = NULL,
imethod = 1, zero = 2)
}
%- maybe also 'usage' for other objects documented here.
@@ -27,11 +26,6 @@ zabinomial(lprob = "logit", eprob = list(),
See \code{\link{Links}} for more choices.
}
- \item{eprob, epobs0}{
- List. Extra argument for the respective links.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{iprob, ipobs0}{
\code{\link{CommonVGAMffArguments}}.
diff --git a/man/zageomUC.Rd b/man/zageomUC.Rd
index 1e89c9e..9f02f39 100644
--- a/man/zageomUC.Rd
+++ b/man/zageomUC.Rd
@@ -65,11 +65,11 @@ rzageom(n, prob, pobs0 = 0)
}
\examples{
-prob = 0.35; pobs0 <- 0.05; x <- (-1):7
+prob <- 0.35; pobs0 <- 0.05; x <- (-1):7
dzageom(x, prob = prob, pobs0 = pobs0)
table(rzageom(100, prob = prob, pobs0 = pobs0))
-\dontrun{ x = 0:10
+\dontrun{ x <- 0:10
barplot(rbind(dzageom(x, prob = prob, pobs0 = pobs0),
dgeom(x, prob = prob)),
beside = TRUE, col = c("blue", "orange"), cex.main = 0.7, las = 1,
diff --git a/man/zageometric.Rd b/man/zageometric.Rd
index 303adf7..d3b392a 100644
--- a/man/zageometric.Rd
+++ b/man/zageometric.Rd
@@ -9,9 +9,8 @@
}
\usage{
-zageometric(lpobs0 = "logit", lprob = "logit",
- epobs0 = list(), eprob = list(),
- imethod = 1, ipobs0 = NULL, iprob = NULL, zero = NULL)
+zageometric(lpobs0 = "logit", lprob = "logit", imethod = 1,
+ ipobs0 = NULL, iprob = NULL, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
@@ -29,11 +28,13 @@ zageometric(lpobs0 = "logit", lprob = "logit",
See \code{\link{Links}} for more choices.
}
- \item{epobs0, eprob}{
- List. Extra argument for the respective links.
- See \code{earg} in \code{\link{Links}} for general information.
- }
+% \item{epobs0, eprob}{
+% List. Extra argument for the respective links.
+% See \code{earg} in \code{\link{Links}} for general information.
+% epobs0 = list(), eprob = list(),
+% }
+
\item{ipobs0, iprob}{
Optional initial values for the parameters.
If given, they must be in range.
@@ -59,6 +60,7 @@ zageometric(lpobs0 = "logit", lprob = "logit",
is implemented in the \pkg{VGAM} package. Some people
call the zero-altered geometric a \emph{hurdle} model.
+
The input can be a matrix.
By default, the two linear/additive
predictors are \eqn{(\log(\phi), logit(p))^T}{(log(phi), logit(prob))^T}.
@@ -120,6 +122,7 @@ zageometric(lpobs0 = "logit", lprob = "logit",
\code{\link[stats:Geometric]{dgeom}},
\code{\link{CommonVGAMffArguments}}.
+
}
\examples{
diff --git a/man/zanegbinUC.Rd b/man/zanegbinUC.Rd
index dcef65f..25f1b03 100644
--- a/man/zanegbinUC.Rd
+++ b/man/zanegbinUC.Rd
@@ -68,7 +68,7 @@ munb <- 3; size <- 4; pobs0 <- 0.3; x <- (-1):7
dzanegbin(x, munb = munb, size = size, pobs0 = pobs0)
table(rzanegbin(100, munb = munb, size = size, pobs0 = pobs0))
-\dontrun{ x = 0:10
+\dontrun{ x <- 0:10
barplot(rbind(dzanegbin(x, munb = munb, size = size, pobs0 = pobs0),
dnbinom(x, mu = munb, size = size)),
beside = TRUE, col = c("blue","green"), cex.main = 0.7, las = 1,
diff --git a/man/zanegbinomial.Rd b/man/zanegbinomial.Rd
index 2d778f5..8b1fd1a 100644
--- a/man/zanegbinomial.Rd
+++ b/man/zanegbinomial.Rd
@@ -10,7 +10,6 @@
}
\usage{
zanegbinomial(lpobs0 = "logit", lmunb = "loge", lsize = "loge",
- epobs0 = list(), emunb = list(), esize = list(),
ipobs0 = NULL, isize = NULL,
zero = c(-1, -3), imethod = 1,
nsimEIM = 250, shrinkage.init = 0.95)
@@ -35,11 +34,13 @@ zanegbinomial(lpobs0 = "logit", lmunb = "loge", lsize = "loge",
See \code{\link{Links}} for more choices.
}
- \item{epobs0, emunb, esize}{
- List. Extra argument for the respective links.
- See \code{earg} in \code{\link{Links}} for general information.
- }
+% \item{epobs0, emunb, esize}{
+% List. Extra argument for the respective links.
+% See \code{earg} in \code{\link{Links}} for general information.
+% epobs0 = list(), emunb = list(), esize = list(),
+% }
+
\item{ipobs0, isize}{
Optional initial values for \eqn{p_0}{pobs0} and \code{k}.
If given, it is okay to give one value
@@ -121,6 +122,11 @@ for counts with extra zeros.
strongly on providing good initial values.
+ This \pkg{VGAM} family function is computationally expensive
+ and usually runs slowly;
+ setting \code{trace = TRUE} is useful for monitoring convergence.
+
+
Inference obtained from \code{summary.vglm} and \code{summary.vgam}
may or may not be correct. In particular, the p-values, standard errors
and degrees of freedom may need adjustment. Use simulation on artificial
@@ -163,6 +169,7 @@ for counts with extra zeros.
}
\examples{
+\dontrun{
zdata <- data.frame(x2 = runif(nn <- 2000))
zdata <- transform(zdata, pobs0 = logit(-1 + 2*x2, inverse = TRUE))
zdata <- transform(zdata,
@@ -176,6 +183,7 @@ coef(fit, matrix = TRUE)
head(fitted(fit))
head(predict(fit))
}
+}
\keyword{models}
\keyword{regression}
diff --git a/man/zapoisUC.Rd b/man/zapoisUC.Rd
index 49717ba..1c12c03 100644
--- a/man/zapoisUC.Rd
+++ b/man/zapoisUC.Rd
@@ -56,14 +56,14 @@ rzapois(n, lambda, pobs0 = 0)
}
\examples{
-lambda = 3; pobs0 = 0.2; x = (-1):7
-(ii = dzapois(x, lambda, pobs0))
-max(abs(cumsum(ii) - pzapois(x, lambda, pobs0))) # Should be 0
+lambda <- 3; pobs0 <- 0.2; x <- (-1):7
+(ii <- dzapois(x, lambda, pobs0))
+max(abs(cumsum(ii) - pzapois(x, lambda, pobs0))) # Should be 0
table(rzapois(100, lambda, pobs0))
table(qzapois(runif(100), lambda, pobs0))
round(dzapois(0:10, lambda, pobs0) * 100) # Should be similar
-\dontrun{ x = 0:10
+\dontrun{ x <- 0:10
barplot(rbind(dzapois(x, lambda, pobs0), dpois(x, lambda)),
beside = TRUE, col = c("blue", "green"), las = 1,
main = paste("ZAP(", lambda, ", pobs0 = ", pobs0, ") [blue] vs",
diff --git a/man/zapoisson.Rd b/man/zapoisson.Rd
index ca03364..d1d19f5 100644
--- a/man/zapoisson.Rd
+++ b/man/zapoisson.Rd
@@ -9,8 +9,7 @@
}
\usage{
-zapoisson(lpobs0 = "logit", llambda = "loge", epobs0 = list(),
- elambda = list(), zero = NULL)
+zapoisson(lpobs0 = "logit", llambda = "loge", zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -24,11 +23,15 @@ zapoisson(lpobs0 = "logit", llambda = "loge", epobs0 = list(),
See \code{\link{Links}} for more choices.
}
- \item{epobs0, elambda}{
- Extra argument for the respective links.
- See \code{earg} in \code{\link{Links}} for general information.
- }
+
+% \item{epobs0, elambda}{
+% epobs0 = list(), elambda = list(),
+% Extra argument for the respective links.
+% See \code{earg} in \code{\link{Links}} for general information.
+% }
+
+
\item{zero}{
See \code{\link{CommonVGAMffArguments}} for more information.
diff --git a/man/zero.Rd b/man/zero.Rd
index 7d8997c..7b1ae01 100644
--- a/man/zero.Rd
+++ b/man/zero.Rd
@@ -101,7 +101,8 @@ args(binom2.or)
args(gpd)
#LMS quantile regression example
-fit = vglm(BMI ~ bs(age, df = 4), lms.bcg(zero = c(1,3)), bmi.nz, trace = TRUE)
+fit <- vglm(BMI ~ bs(age, df = 4), lms.bcg(zero = c(1,3)),
+ bmi.nz, trace = TRUE)
coef(fit, matrix = TRUE)
}
\keyword{models}
diff --git a/man/zeta.Rd b/man/zeta.Rd
index 9c5dd8b..d668ffb 100644
--- a/man/zeta.Rd
+++ b/man/zeta.Rd
@@ -56,6 +56,8 @@ zeta(x, deriv = 0)
% The derivative is attached as an attribute zz.
+
+
}
\references{
@@ -107,20 +109,20 @@ curve(zeta, -13, 0.8, xlim = c(-12, 10), ylim = c(-1, 4), col = "orange")
curve(zeta, 1.2, 12, add = TRUE, col = "orange")
abline(v = 0, h = c(0, 1), lty = "dashed")
-curve(zeta, -14, -0.4, col = "orange") # Close up plot
+curve(zeta, -14, -0.4, col = "orange") # Close up plot
abline(v = 0, h = 0, lty = "dashed")
-x = seq(0.04, 0.8, len = 100) # Plot of the first derivative
+x <- seq(0.04, 0.8, len = 100) # Plot of the first derivative
plot(x, zeta(x, deriv = 1), type = "l", las = 1, col = "blue",
xlim = c(0.04, 3), ylim = c(-6, 0), main = "zeta'(x)")
-x = seq(1.2, 3, len = 100)
+x <- seq(1.2, 3, len = 100)
lines(x, zeta(x, deriv = 1), col = "blue")
abline(v = 0, h = 0, lty = "dashed") }
-zeta(2) - pi^2 / 6 # Should be zero
-zeta(4) - pi^4 / 90 # Should be zero
-zeta(6) - pi^6 / 945 # Should be 0
-zeta(8) - pi^8 / 9450 # Should be 0
+zeta(2) - pi^2 / 6 # Should be zero
+zeta(4) - pi^4 / 90 # Should be zero
+zeta(6) - pi^6 / 945 # Should be 0
+zeta(8) - pi^8 / 9450 # Should be 0
# zeta(0, deriv = 1) + 0.5 * log(2*pi) # Should be 0
}
\keyword{math}
diff --git a/man/zetaff.Rd b/man/zetaff.Rd
index 255a2ff..bd0d549 100644
--- a/man/zetaff.Rd
+++ b/man/zetaff.Rd
@@ -6,11 +6,11 @@
Estimates the parameter of the zeta distribution.
}
\usage{
-zetaff(link = "loge", earg = list(), init.p = NULL)
+zetaff(link = "loge", init.p = NULL, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{link, earg, init.p}{
+ \item{link, init.p, zero}{
See \code{\link{CommonVGAMffArguments}} for more information.
These arguments apply to the (positive) parameter \eqn{p}.
See \code{\link{Links}} for more choices.
@@ -42,6 +42,9 @@ convergence. If convergence is not obtained, try several values
ranging from values near 0 to values about 10 or more.
+Multiple responses are handled.
+
+
}
\value{
@@ -49,6 +52,7 @@ ranging from values near 0 to values about 10 or more.
The object is used by modelling functions such as \code{\link{vglm}},
and \code{\link{vgam}}.
+
}
%Lindsey, J. K. (1995)
@@ -73,8 +77,9 @@ Boca Raton: Chapman & Hall/CRC Press.
}
\author{ T. W. Yee }
\note{
- The \code{\link{zeta}} function may be used to compute values of the
- zeta function.
+ The \code{\link{zeta}} function may be used to compute values
+ of the zeta function.
+
}
@@ -84,11 +89,12 @@ Boca Raton: Chapman & Hall/CRC Press.
\code{\link{hzeta}},
\code{\link{zipf}}.
+
}
\examples{
-zdata = data.frame(y = 1:5, w = c(63, 14, 5, 1, 2)) # Knight, p.304
-fit = vglm(y ~ 1, zetaff, zdata, trace = TRUE, weight = w, crit = "c")
-(phat = Coef(fit)) # 1.682557
+zdata <- data.frame(y = 1:5, w = c(63, 14, 5, 1, 2)) # Knight, p.304
+fit <- vglm(y ~ 1, zetaff, zdata, trace = TRUE, weight = w, crit = "coef")
+(phat <- Coef(fit)) # 1.682557
with(zdata, cbind(round(dzeta(y, phat) * sum(w), 1), w))
with(zdata, weighted.mean(y, w))
@@ -100,3 +106,4 @@ with(zdata, mean(log(rep(y, w))) + zeta(1+phat, deriv = 1) / zeta(1+phat))
}
\keyword{models}
\keyword{regression}
+% Also known as the Joos model or discrete Pareto distribution.
diff --git a/man/zibinomUC.Rd b/man/zibinomUC.Rd
index bdfd276..4bdabe0 100644
--- a/man/zibinomUC.Rd
+++ b/man/zibinomUC.Rd
@@ -11,6 +11,7 @@
generation for the zero-inflated binomial distribution with
parameter \code{pstr0}.
+
}
\usage{
dzibinom(x, size, prob, pstr0 = 0, log = FALSE)
@@ -74,17 +75,18 @@ rzibinom(n, size, prob, pstr0 = 0)
\code{\link{zibinomial}},
\code{\link[stats:Binomial]{dbinom}}.
+
}
\examples{
-prob = 0.2; size = 10; pstr0 = 0.5
-(ii = dzibinom(0:size, size, prob, pstr0 = pstr0))
-max(abs(cumsum(ii) - pzibinom(0:size, size, prob, pstr0 = pstr0))) # Should be 0
+prob <- 0.2; size <- 10; pstr0 <- 0.5
+(ii <- dzibinom(0:size, size, prob, pstr0 = pstr0))
+max(abs(cumsum(ii) - pzibinom(0:size, size, prob, pstr0 = pstr0))) # Should be 0
table(rzibinom(100, size, prob, pstr0 = pstr0))
table(qzibinom(runif(100), size, prob, pstr0 = pstr0))
round(dzibinom(0:10, size, prob, pstr0 = pstr0) * 100) # Should be similar
-\dontrun{ x = 0:size
+\dontrun{ x <- 0:size
barplot(rbind(dzibinom(x, size, prob, pstr0 = pstr0),
dbinom(x, size, prob)),
beside = TRUE, col = c("blue", "green"), ylab = "Probability",
diff --git a/man/zibinomial.Rd b/man/zibinomial.Rd
index f10bd40..4cd7b76 100644
--- a/man/zibinomial.Rd
+++ b/man/zibinomial.Rd
@@ -9,7 +9,6 @@
}
\usage{
zibinomial(lpstr0 = "logit", lprob = "logit",
- epstr0 = list(), eprob = list(),
ipstr0 = NULL, zero = 1, mv = FALSE, imethod = 1)
}
%- maybe also 'usage' for other objects documented here.
@@ -21,11 +20,12 @@ zibinomial(lpstr0 = "logit", lprob = "logit",
For the zero-\emph{deflated} model see below.
}
- \item{epstr0, eprob}{
- List. Extra argument for the respective links.
- See \code{earg} in \code{\link{Links}} for general information.
- }
+% \item{epstr0, eprob}{
+% epstr0 = list(), eprob = list(),
+% List. Extra argument for the respective links.
+% See \code{earg} in \code{\link{Links}} for general information.
+% }
\item{ipstr0}{
Optional initial values for \eqn{\phi}{phi}, whose values must lie
@@ -117,6 +117,7 @@ zibinomial(lpstr0 = "logit", lprob = "logit",
Half-stepping is not uncommon.
If failure to converge occurs, make use of the argument \code{ipstr0}.
+
}
\seealso{
@@ -125,17 +126,18 @@ zibinomial(lpstr0 = "logit", lprob = "logit",
\code{\link{posbinomial}},
\code{\link[stats:Binomial]{rbinom}}.
+
}
\examples{
-size = 10 # Number of trials; N in the notation above
-nn = 200
-zibdata = data.frame(pstr0 = logit( 0, inverse = TRUE), # 0.50
- mubin = logit(-1, inverse = TRUE), # Mean of usual binomial
- sv = rep(size, length = nn))
-zibdata = transform(zibdata,
- y = rzibinom(nn, size = sv, prob = mubin, pstr0 = pstr0))
+size <- 10 # Number of trials; N in the notation above
+nn <- 200
+zibdata <- data.frame(pstr0 = logit( 0, inverse = TRUE), # 0.50
+ mubin = logit(-1, inverse = TRUE), # Mean of usual binomial
+ sv = rep(size, length = nn))
+zibdata <- transform(zibdata,
+ y = rzibinom(nn, size = sv, prob = mubin, pstr0 = pstr0))
with(zibdata, table(y))
-fit = vglm(cbind(y, sv - y) ~ 1, zibinomial, zibdata, trace = TRUE)
+fit <- vglm(cbind(y, sv - y) ~ 1, zibinomial, zibdata, trace = TRUE)
coef(fit, matrix = TRUE)
Coef(fit) # Useful for intercept-only models
diff --git a/man/zigeomUC.Rd b/man/zigeomUC.Rd
index 4913b19..cb49cfd 100644
--- a/man/zigeomUC.Rd
+++ b/man/zigeomUC.Rd
@@ -10,6 +10,7 @@
Density, and random generation
for the zero-inflated geometric distribution with parameter \code{pstr0}.
+
}
\usage{
dzigeom(x, prob, pstr0 = 0, log = FALSE)
@@ -71,12 +72,12 @@ rzigeom(n, prob, pstr0 = 0)
}
\examples{
-prob = 0.5; pstr0 = 0.2; x = (-1):20
-(ii = dzigeom(x, prob, pstr0))
+prob <- 0.5; pstr0 <- 0.2; x <- (-1):20
+(ii <- dzigeom(x, prob, pstr0))
max(abs(cumsum(ii) - pzigeom(x, prob, pstr0))) # Should be 0
table(rzigeom(1000, prob, pstr0))
-\dontrun{ x = 0:10
+\dontrun{ x <- 0:10
barplot(rbind(dzigeom(x, prob, pstr0), dgeom(x, prob)),
beside = TRUE, col = c("blue","orange"),
ylab = "P[Y = y]", xlab = "y", las = 1,
diff --git a/man/zigeometric.Rd b/man/zigeometric.Rd
index 09db7cc..b9a7e93 100644
--- a/man/zigeometric.Rd
+++ b/man/zigeometric.Rd
@@ -8,8 +8,7 @@
}
\usage{
-zigeometric(lprob = "logit", eprob = list(),
- lpstr0 = "logit", epstr0 = list(),
+zigeometric(lprob = "logit", lpstr0 = "logit",
iprob = NULL, ipstr0 = NULL,
imethod = 1, bias.red = 0.5, zero = 2)
}
@@ -24,11 +23,12 @@ zigeometric(lprob = "logit", eprob = list(),
For the zero-\emph{deflated} model see below.
}
- \item{eprob, epstr0}{
- List. Extra argument for the respective links.
- See \code{earg} in \code{\link{Links}} for general information.
- }
+% \item{eprob, epstr0}{ eprob = list(), epstr0 = list(),
+% List. Extra argument for the respective links.
+% See \code{earg} in \code{\link{Links}} for general information.
+% }
+
\item{bias.red}{
A constant used in the initialization process of \code{pstr0}.
It should lie between 0 and 1, with 1 having no effect.
@@ -98,32 +98,33 @@ zigeometric(lprob = "logit", eprob = list(),
\code{\link{zageometric}},
\code{\link[stats]{rgeom}}.
+
}
\examples{
-gdata = data.frame(x2 = runif(nn <- 1000) - 0.5)
-gdata = transform(gdata, x3 = runif(nn) - 0.5,
- x4 = runif(nn) - 0.5)
-gdata = transform(gdata, eta1 = 1.0 - 1.0 * x2 + 2.0 * x3,
- eta2 = -1.0,
- eta3 = 0.5)
-gdata = transform(gdata, prob1 = logit(eta1, inverse = TRUE),
- prob2 = logit(eta2, inverse = TRUE),
- prob3 = logit(eta3, inverse = TRUE))
-gdata = transform(gdata, y1 = rzigeom(nn, prob1, pstr0 = prob3),
- y2 = rzigeom(nn, prob2, pstr0 = prob3),
- y3 = rzigeom(nn, prob2, pstr0 = prob3))
+gdata <- data.frame(x2 = runif(nn <- 1000) - 0.5)
+gdata <- transform(gdata, x3 = runif(nn) - 0.5,
+ x4 = runif(nn) - 0.5)
+gdata <- transform(gdata, eta1 = 1.0 - 1.0 * x2 + 2.0 * x3,
+ eta2 = -1.0,
+ eta3 = 0.5)
+gdata <- transform(gdata, prob1 = logit(eta1, inverse = TRUE),
+ prob2 = logit(eta2, inverse = TRUE),
+ prob3 = logit(eta3, inverse = TRUE))
+gdata <- transform(gdata, y1 = rzigeom(nn, prob1, pstr0 = prob3),
+ y2 = rzigeom(nn, prob2, pstr0 = prob3),
+ y3 = rzigeom(nn, prob2, pstr0 = prob3))
with(gdata, table(y1))
with(gdata, table(y2))
with(gdata, table(y3))
head(gdata)
-fit1 = vglm(y1 ~ x2 + x3 + x4, zigeometric, gdata, trace = TRUE)
+fit1 <- vglm(y1 ~ x2 + x3 + x4, zigeometric, gdata, trace = TRUE)
coef(fit1, matrix = TRUE)
-fit2 = vglm(y2 ~ 1, zigeometric, gdata, trace = TRUE)
+fit2 <- vglm(y2 ~ 1, zigeometric, gdata, trace = TRUE)
coef(fit2, matrix = TRUE)
-fit3 = vglm(y3 ~ 1, zigeometric, gdata, trace = TRUE)
+fit3 <- vglm(y3 ~ 1, zigeometric, gdata, trace = TRUE)
coef(fit3, matrix = TRUE)
summary(fit3)
}
diff --git a/man/zinegbinUC.Rd b/man/zinegbinUC.Rd
index 5f43bbd..1c3e37a 100644
--- a/man/zinegbinUC.Rd
+++ b/man/zinegbinUC.Rd
@@ -11,6 +11,7 @@
for the zero-inflated negative binomial distribution with
parameter \code{pstr0}.
+
}
\usage{
dzinegbin(x, size, prob = NULL, munb = NULL, pstr0 = 0, log = FALSE)
@@ -86,8 +87,8 @@ rzinegbin(n, size, prob = NULL, munb = NULL, pstr0 = 0)
}
\examples{
-munb = 3; pstr0 = 0.2; size = k = 10; x = 0:10
-(ii = dzinegbin(x, pstr0 = pstr0, mu = munb, size = k))
+munb <- 3; pstr0 <- 0.2; size <- k <- 10; x <- 0:10
+(ii <- dzinegbin(x, pstr0 = pstr0, mu = munb, size = k))
max(abs(cumsum(ii) - pzinegbin(x, pstr0 = pstr0, mu = munb, size = k))) # 0
table(rzinegbin(100, pstr0 = pstr0, mu = munb, size = k))
diff --git a/man/zinegbinomial.Rd b/man/zinegbinomial.Rd
index 4462e8f..6851b11 100644
--- a/man/zinegbinomial.Rd
+++ b/man/zinegbinomial.Rd
@@ -9,7 +9,6 @@
}
\usage{
zinegbinomial(lpstr0 = "logit", lmunb = "loge", lsize = "loge",
- epstr0 = list(), emunb = list(), esize = list(),
ipstr0 = NULL, isize = NULL, zero = c(-1, -3),
imethod = 1, shrinkage.init = 0.95, nsimEIM = 250)
}
@@ -23,11 +22,12 @@ zinegbinomial(lpstr0 = "logit", lmunb = "loge", lsize = "loge",
}
- \item{epstr0, emunb, esize}{
- List. Extra arguments for the respective links.
- See \code{earg} in \code{\link{Links}} for general information.
- }
+% \item{epstr0, emunb, esize}{
+% epstr0 = list(), emunb = list(), esize = list(),
+% List. Extra arguments for the respective links.
+% See \code{earg} in \code{\link{Links}} for general information.
+% }
\item{ipstr0, isize}{
Optional initial values for \eqn{\phi}{pstr0} and \eqn{k}{k}.
@@ -125,6 +125,12 @@ zinegbinomial(lpstr0 = "logit", lmunb = "loge", lsize = "loge",
\code{isize}, and/or
\code{zero} if there are explanatory variables.
+
+ This \pkg{VGAM} family function is computationally expensive
+ and usually runs slowly;
+ setting \code{trace = TRUE} is useful for monitoring convergence.
+
+
}
\seealso{
@@ -133,9 +139,10 @@ zinegbinomial(lpstr0 = "logit", lmunb = "loge", lsize = "loge",
\code{\link[stats:Poisson]{rpois}},
\code{\link{CommonVGAMffArguments}}.
+
}
\examples{
-# Example 1
+\dontrun{ # Example 1
ndata <- data.frame(x2 = runif(nn <- 1000))
ndata <- transform(ndata, pstr0 = logit(-0.5 + 1 * x2, inverse = TRUE),
munb = exp( 3 + 1 * x2),
@@ -166,6 +173,7 @@ rrzinb <- rrvglm(y1 ~ x2 + x3, zinegbinomial(zero = NULL), ndata,
coef(rrzinb, matrix = TRUE)
Coef(rrzinb)
}
+}
\keyword{models}
\keyword{regression}
diff --git a/man/zipebcom.Rd b/man/zipebcom.Rd
index 6077913..4f720d5 100644
--- a/man/zipebcom.Rd
+++ b/man/zipebcom.Rd
@@ -12,16 +12,15 @@
}
\usage{
zipebcom(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
- emu12 = list(), ephi12 = list(), eoratio = list(),
imu12 = NULL, iphi12 = NULL, ioratio = NULL,
zero = 2:3, tol = 0.001, addRidge = 0.001)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{lmu12, emu12, imu12}{
+ \item{lmu12, imu12}{
Link function, extra argument and optional initial values for
the first (and second) marginal probabilities.
- Arguments \code{lmu12} and \code{emu12} should be left alone.
+ Argument \code{lmu12} should be left alone.
Argument \code{imu12} may be of length 2 (one element for each response).
}
@@ -46,11 +45,13 @@ zipebcom(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
proportions of zeros in either response.
}
- \item{ephi12, eoratio}{
- List. Extra argument for each of the links.
- See \code{earg} in \code{\link{Links}} for general information.
- }
+% \item{ephi12, eoratio}{
+% List. Extra argument for each of the links.
+% emu12 = list(), ephi12 = list(), eoratio = list(),
+% See \code{earg} in \code{\link{Links}} for general information.
+% }
+
\item{zero}{
Which linear/additive predictor is modelled as an intercept only?
A \code{NULL} means none.
@@ -202,15 +203,16 @@ zipebcom(lmu12 = "cloglog", lphi12 = "logit", loratio = "loge",
\code{\link{cloglog}},
\code{\link{CommonVGAMffArguments}}.
+
}
\examples{
-mydat = data.frame(x = seq(0, 1, len=(nsites <- 2000)))
-mydat = transform(mydat, eta1 = -3 + 5 * x,
+mydat <- data.frame(x = seq(0, 1, len=(nsites <- 2000)))
+mydat <- transform(mydat, eta1 = -3 + 5 * x,
phi1 = logit(-1, inverse=TRUE),
oratio = exp(2))
-mydat = transform(mydat, mu12 = cloglog(eta1, inverse=TRUE) * (1-phi1))
-tmat = with(mydat, rbinom2.or(nsites, mu1=mu12, oratio=oratio, exch=TRUE))
-mydat = transform(mydat, ybin1 = tmat[,1], ybin2 = tmat[,2])
+mydat <- transform(mydat, mu12 = cloglog(eta1, inverse = TRUE) * (1-phi1))
+tmat <- with(mydat, rbinom2.or(nsites, mu1 = mu12, oratio = oratio, exch = TRUE))
+mydat <- transform(mydat, ybin1 = tmat[,1], ybin2 = tmat[,2])
with(mydat, table(ybin1,ybin2)) / nsites # For interest only
\dontrun{
@@ -224,12 +226,12 @@ plot(mu12 ~ x, data = mydat, col = "blue", type = "l", ylim = 0:1,
ylab = "Probability", main = "Marginal probability and phi")
with(mydat, abline(h = phi1[1], col = "red", lty = "dashed"))
-tmat2 = with(mydat, dbinom2.or(mu1 = mu12, oratio = oratio, exch = TRUE))
+tmat2 <- with(mydat, dbinom2.or(mu1 = mu12, oratio = oratio, exch = TRUE))
with(mydat, matplot(x, tmat2, col = 1:4, type = "l", ylim = 0:1,
ylab = "Probability", main = "Joint probabilities")) }
# Now fit the model to the data.
-fit = vglm(cbind(ybin1,ybin2) ~ x, fam = zipebcom, dat = mydat, trace = TRUE)
+fit <- vglm(cbind(ybin1, ybin2) ~ x, fam = zipebcom, dat = mydat, trace = TRUE)
coef(fit, matrix = TRUE)
summary(fit)
vcov(fit)
diff --git a/man/zipf.Rd b/man/zipf.Rd
index 8d4f4b3..aa01ce5 100644
--- a/man/zipf.Rd
+++ b/man/zipf.Rd
@@ -7,7 +7,7 @@
}
\usage{
-zipf(N = NULL, link = "loge", earg = list(), init.s = NULL)
+zipf(N = NULL, link = "loge", init.s = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
@@ -24,11 +24,6 @@ zipf(N = NULL, link = "loge", earg = list(), init.s = NULL)
See \code{\link{Links}} for more choices.
}
- \item{earg}{
- List. Extra argument for the link.
- See \code{earg} in \code{\link{Links}} for general information.
-
- }
\item{init.s}{
Optional initial value for the parameter \eqn{s}.
The default is to choose an initial value internally.
@@ -73,7 +68,7 @@ zipf(N = NULL, link = "loge", earg = list(), init.s = NULL)
Johnson N. L., Kemp, A. W. and Kotz S. (2005)
\emph{Univariate Discrete Distributions},
3rd edition,
- Hoboken, New Jersey: Wiley.
+ Hoboken, New Jersey, USA: Wiley.
}
\author{ T. W. Yee }
@@ -89,12 +84,12 @@ zipf(N = NULL, link = "loge", earg = list(), init.s = NULL)
}
\examples{
-zdata = data.frame(y = 1:5, ofreq = c(63, 14, 5, 1, 2))
-fit = vglm(y ~ 1, zipf, zdata, trace = TRUE, weight = ofreq, crit = "coef")
-fit = vglm(y ~ 1, zipf(link = identity, init = 3.4), zdata,
- trace = TRUE, weight = ofreq)
+zdata <- data.frame(y = 1:5, ofreq = c(63, 14, 5, 1, 2))
+fit <- vglm(y ~ 1, zipf, zdata, trace = TRUE, weight = ofreq, crit = "coef")
+fit <- vglm(y ~ 1, zipf(link = identity, init = 3.4), zdata,
+ trace = TRUE, weight = ofreq)
fit at misc$N
-(shat = Coef(fit))
+(shat <- Coef(fit))
with(zdata, weighted.mean(y, ofreq))
fitted(fit, matrix = FALSE)
}
diff --git a/man/zipfUC.Rd b/man/zipfUC.Rd
index 8a595b5..ca311f3 100644
--- a/man/zipfUC.Rd
+++ b/man/zipfUC.Rd
@@ -52,12 +52,12 @@ pzipf(q, N, s)
}
\examples{
-N = 10; s = 0.5; y = 1:N
-proby = dzipf(y, N = N, s = s)
+N <- 10; s <- 0.5; y <- 1:N
+proby <- dzipf(y, N = N, s = s)
\dontrun{ plot(proby ~ y, type = "h", col = "blue", ylab = "Probability",
ylim = c(0, 0.2), main = paste("Zipf(N = ",N,", s = ",s,")", sep = ""),
lwd = 2, las = 1) }
-sum(proby) # Should be 1
+sum(proby) # Should be 1
max(abs(cumsum(proby) - pzipf(y, N = N, s = s))) # Should be 0
}
\keyword{distribution}
diff --git a/man/zipoisUC.Rd b/man/zipoisUC.Rd
index f4a7f1c..ee6b1c6 100644
--- a/man/zipoisUC.Rd
+++ b/man/zipoisUC.Rd
@@ -79,15 +79,15 @@ rzipois(n, lambda, pstr0 = 0)
}
\examples{
-lambda = 3; pstr0 = 0.2; x = (-1):7
-(ii = dzipois(x, lambda, pstr0 = pstr0))
+lambda <- 3; pstr0 <- 0.2; x <- (-1):7
+(ii <- dzipois(x, lambda, pstr0 = pstr0))
max(abs(cumsum(ii) - pzipois(x, lambda, pstr0 = pstr0))) # Should be 0
table(rzipois(100, lambda, pstr0 = pstr0))
table(qzipois(runif(100), lambda, pstr0))
round(dzipois(0:10, lambda, pstr0 = pstr0) * 100) # Should be similar
-\dontrun{ x = 0:10
+\dontrun{ x <- 0:10
par(mfrow = c(2, 1)) # Zero-inflated Poisson
barplot(rbind(dzipois(x, lambda, pstr0 = pstr0), dpois(x, lambda)),
beside = TRUE, col = c("blue","orange"),
@@ -95,8 +95,8 @@ barplot(rbind(dzipois(x, lambda, pstr0 = pstr0), dpois(x, lambda)),
" Poisson(", lambda, ") (orange)", sep = ""),
names.arg = as.character(x))
-deflat_limit = -1 / expm1(lambda) # Zero-deflated Poisson
-newpstr0 = round(deflat_limit / 1.5, 3)
+deflat_limit <- -1 / expm1(lambda) # Zero-deflated Poisson
+newpstr0 <- round(deflat_limit / 1.5, 3)
barplot(rbind(dzipois(x, lambda, pstr0 = newpstr0),
dpois(x, lambda)),
beside = TRUE, col = c("blue","orange"),
diff --git a/man/zipoisson.Rd b/man/zipoisson.Rd
index 2a24753..c0e9ff8 100644
--- a/man/zipoisson.Rd
+++ b/man/zipoisson.Rd
@@ -10,22 +10,19 @@
}
\usage{
zipoissonff(llambda = "loge", lprobp = "logit",
- elambda = list(), eprobp = list(),
- ilambda = NULL, iprobp = NULL, imethod = 1,
- shrinkage.init = 0.8, zero = -2)
+ ilambda = NULL, iprobp = NULL,
+ imethod = 1, shrinkage.init = 0.8, zero = -2)
zipoisson(lpstr0 = "logit", llambda = "loge",
- epstr0 = list(), elambda = list(),
- ipstr0 = NULL, ilambda = NULL, imethod = 1,
- shrinkage.init = 0.8, zero = NULL)
+ ipstr0 = NULL, ilambda = NULL,
+ imethod = 1, shrinkage.init = 0.8, zero = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
- \item{lpstr0, llambda, epstr0, elambda}{
- Link function and extra argument for the parameter \eqn{\phi}{phi}
+ \item{lpstr0, llambda}{
+ Link function for the parameter \eqn{\phi}{phi}
and the usual \eqn{\lambda}{lambda} parameter.
- See \code{\link{Links}} for more choices,
- and \code{earg} in \code{\link{Links}} for general information.
- See \code{\link{CommonVGAMffArguments}} for more information.
+ See \code{\link{Links}} for more choices;
+ see \code{\link{CommonVGAMffArguments}} for more information.
For the zero-\emph{deflated} model see below.
@@ -40,7 +37,7 @@ zipoisson(lpstr0 = "logit", llambda = "loge",
If a vector then recycling is used.
}
- \item{lprobp, eprobp, iprobp}{
+ \item{lprobp, iprobp}{
Corresponding arguments for the other parameterization.
See details below.
@@ -54,6 +51,7 @@ zipoisson(lpstr0 = "logit", llambda = "loge",
and/or else specify a value for \code{ipstr0}.
See \code{\link{CommonVGAMffArguments}} for more information.
+
}
\item{shrinkage.init}{
How much shrinkage is used when initializing \eqn{\lambda}{lambda}.
@@ -63,6 +61,7 @@ zipoisson(lpstr0 = "logit", llambda = "loge",
This argument is used in conjunction with \code{imethod}.
See \code{\link{CommonVGAMffArguments}} for more information.
+
}
\item{zero}{
An integer specifying which linear/additive predictor is modelled as
@@ -71,6 +70,7 @@ zipoisson(lpstr0 = "logit", llambda = "loge",
a single parameter.
See \code{\link{CommonVGAMffArguments}} for more information.
+
}
}
\details{
@@ -121,6 +121,7 @@ zipoisson(lpstr0 = "logit", llambda = "loge",
}
\references{
+
Thas, O. and Rayner, J. C. W. (2005)
Smooth tests for the zero-inflated Poisson distribution.
\emph{Biometrics},
@@ -225,16 +226,16 @@ with(zdata, table(y1)) # Eyeball the data
with(zdata, table(y2))
fit1 <- vglm(y1 ~ x2, zipoisson(zero = 1), zdata, crit = "coef")
fit2 <- vglm(y2 ~ x2, zipoisson(zero = 1), zdata, crit = "coef")
-coef(fit1, matrix = TRUE) # These should agree with the above values
-coef(fit2, matrix = TRUE) # These should agree with the above values
+coef(fit1, matrix = TRUE) # These should agree with the above values
+coef(fit2, matrix = TRUE) # These should agree with the above values
# Fit all two simultaneously, using a different parameterization:
fit12 <- vglm(cbind(y1, y2) ~ x2, zipoissonff, zdata, crit = "coef")
-coef(fit12, matrix = TRUE) # These should agree with the above values
+coef(fit12, matrix = TRUE) # These should agree with the above values
# Example 2: McKendrick (1926). Data from 223 Indian village households
cholera <- data.frame(ncases = 0:4, # Number of cholera cases,
- wfreq = c(168, 32, 16, 6, 1)) # Frequencies
+ wfreq = c(168, 32, 16, 6, 1)) # Frequencies
fit <- vglm(ncases ~ 1, zipoisson, wei = wfreq, cholera, trace = TRUE)
coef(fit, matrix = TRUE)
with(cholera, cbind(actual = wfreq,
@@ -266,8 +267,8 @@ fit3 <- vglm(y3 ~ 1, zipoisson(zero = -1, lpstr0 = identity),
zdata, trace = TRUE, crit = "coef")
coef(fit3, matrix = TRUE)
# Check how accurate it was:
-zdata[1, 'usepstr0'] # Answer
-coef(fit3)[1] # Estimate
+zdata[1, 'usepstr0'] # Answer
+coef(fit3)[1] # Estimate
Coef(fit3)
# Example 5: This RR-ZIP is known as a COZIGAM or COZIVGLM-ZIP
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-vgam.git
More information about the debian-science-commits
mailing list